2 * Copyright (C) 2011, 2012 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "DFGByteCodeParser.h"
31 #include "ArrayConstructor.h"
32 #include "CallLinkStatus.h"
33 #include "CodeBlock.h"
34 #include "DFGArrayMode.h"
35 #include "DFGByteCodeCache.h"
36 #include "DFGCapabilities.h"
37 #include "GetByIdStatus.h"
38 #include "PutByIdStatus.h"
39 #include "ResolveGlobalStatus.h"
40 #include <wtf/HashMap.h>
41 #include <wtf/MathExtras.h>
43 namespace JSC { namespace DFG {
45 class ConstantBufferKey {
53 ConstantBufferKey(WTF::HashTableDeletedValueType)
59 ConstantBufferKey(CodeBlock* codeBlock, unsigned index)
60 : m_codeBlock(codeBlock)
65 bool operator==(const ConstantBufferKey& other) const
67 return m_codeBlock == other.m_codeBlock
68 && m_index == other.m_index;
73 return WTF::PtrHash<CodeBlock*>::hash(m_codeBlock) ^ m_index;
76 bool isHashTableDeletedValue() const
78 return !m_codeBlock && m_index;
81 CodeBlock* codeBlock() const { return m_codeBlock; }
82 unsigned index() const { return m_index; }
85 CodeBlock* m_codeBlock;
89 struct ConstantBufferKeyHash {
90 static unsigned hash(const ConstantBufferKey& key) { return key.hash(); }
91 static bool equal(const ConstantBufferKey& a, const ConstantBufferKey& b)
96 static const bool safeToCompareToEmptyOrDeleted = true;
99 } } // namespace JSC::DFG
103 template<typename T> struct DefaultHash;
104 template<> struct DefaultHash<JSC::DFG::ConstantBufferKey> {
105 typedef JSC::DFG::ConstantBufferKeyHash Hash;
108 template<typename T> struct HashTraits;
109 template<> struct HashTraits<JSC::DFG::ConstantBufferKey> : SimpleClassHashTraits<JSC::DFG::ConstantBufferKey> { };
113 namespace JSC { namespace DFG {
115 // === ByteCodeParser ===
117 // This class is used to compile the dataflow graph from a CodeBlock.
118 class ByteCodeParser {
120 ByteCodeParser(ExecState* exec, Graph& graph)
122 , m_globalData(&graph.m_globalData)
123 , m_codeBlock(graph.m_codeBlock)
124 , m_profiledBlock(graph.m_profiledBlock)
128 , m_currentProfilingIndex(0)
129 , m_constantUndefined(UINT_MAX)
130 , m_constantNull(UINT_MAX)
131 , m_constantNaN(UINT_MAX)
132 , m_constant1(UINT_MAX)
133 , m_constants(m_codeBlock->numberOfConstantRegisters())
134 , m_numArguments(m_codeBlock->numParameters())
135 , m_numLocals(m_codeBlock->m_numCalleeRegisters)
136 , m_preservedVars(m_codeBlock->m_numVars)
137 , m_parameterSlots(0)
138 , m_numPassedVarArgs(0)
139 , m_globalResolveNumber(0)
140 , m_inlineStackTop(0)
141 , m_haveBuiltOperandMaps(false)
142 , m_emptyJSValueIndex(UINT_MAX)
143 , m_currentInstruction(0)
145 ASSERT(m_profiledBlock);
147 for (int i = 0; i < m_codeBlock->m_numVars; ++i)
148 m_preservedVars.set(i);
151 // Parse a full CodeBlock of bytecode.
155 // Just parse from m_currentIndex to the end of the current CodeBlock.
156 void parseCodeBlock();
158 // Helper for min and max.
159 bool handleMinMax(bool usesResult, int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis);
161 // Handle calls. This resolves issues surrounding inlining and intrinsics.
162 void handleCall(Interpreter*, Instruction* currentInstruction, NodeType op, CodeSpecializationKind);
163 void emitFunctionCheck(JSFunction* expectedFunction, NodeIndex callTarget, int registerOffset, CodeSpecializationKind);
164 // Handle inlining. Return true if it succeeded, false if we need to plant a call.
165 bool handleInlining(bool usesResult, int callTarget, NodeIndex callTargetNodeIndex, int resultOperand, bool certainAboutExpectedFunction, JSFunction*, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, CodeSpecializationKind);
166 // Handle setting the result of an intrinsic.
167 void setIntrinsicResult(bool usesResult, int resultOperand, NodeIndex);
168 // Handle intrinsic functions. Return true if it succeeded, false if we need to plant a call.
169 bool handleIntrinsic(bool usesResult, int resultOperand, Intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction);
170 bool handleConstantInternalFunction(bool usesResult, int resultOperand, InternalFunction*, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, CodeSpecializationKind);
171 NodeIndex handleGetByOffset(SpeculatedType, NodeIndex base, unsigned identifierNumber, PropertyOffset);
172 void handleGetByOffset(
173 int destinationOperand, SpeculatedType, NodeIndex base, unsigned identifierNumber,
176 int destinationOperand, SpeculatedType, NodeIndex base, unsigned identifierNumber,
177 const GetByIdStatus&);
179 NodeIndex getScope(bool skipTop, unsigned skipCount);
181 // Convert a set of ResolveOperations into graph nodes
182 bool parseResolveOperations(SpeculatedType, unsigned identifierNumber, unsigned operations, unsigned putToBaseOperation, NodeIndex* base, NodeIndex* value);
184 // Prepare to parse a block.
185 void prepareToParseBlock();
186 // Parse a single basic block of bytecode instructions.
187 bool parseBlock(unsigned limit);
188 // Link block successors.
189 void linkBlock(BasicBlock*, Vector<BlockIndex>& possibleTargets);
190 void linkBlocks(Vector<UnlinkedBlock>& unlinkedBlocks, Vector<BlockIndex>& possibleTargets);
191 // Link GetLocal & SetLocal nodes, to ensure live values are generated.
196 template<PhiStackType stackType>
197 void processPhiStack();
199 void fixVariableAccessPredictions();
200 // Add spill locations to nodes.
201 void allocateVirtualRegisters();
203 VariableAccessData* newVariableAccessData(int operand, bool isCaptured)
205 ASSERT(operand < FirstConstantRegisterIndex);
207 m_graph.m_variableAccessData.append(VariableAccessData(static_cast<VirtualRegister>(operand), isCaptured));
208 return &m_graph.m_variableAccessData.last();
211 // Get/Set the operands/result of a bytecode instruction.
212 NodeIndex getDirect(int operand)
214 // Is this a constant?
215 if (operand >= FirstConstantRegisterIndex) {
216 unsigned constant = operand - FirstConstantRegisterIndex;
217 ASSERT(constant < m_constants.size());
218 return getJSConstant(constant);
221 if (operand == JSStack::Callee)
224 // Is this an argument?
225 if (operandIsArgument(operand))
226 return getArgument(operand);
229 return getLocal((unsigned)operand);
231 NodeIndex get(int operand)
233 return getDirect(m_inlineStackTop->remapOperand(operand));
235 enum SetMode { NormalSet, SetOnEntry };
236 void setDirect(int operand, NodeIndex value, SetMode setMode = NormalSet)
238 // Is this an argument?
239 if (operandIsArgument(operand)) {
240 setArgument(operand, value, setMode);
245 setLocal((unsigned)operand, value, setMode);
247 void set(int operand, NodeIndex value, SetMode setMode = NormalSet)
249 setDirect(m_inlineStackTop->remapOperand(operand), value, setMode);
252 void setPair(int operand1, NodeIndex value1, int operand2, NodeIndex value2)
254 // First emit dead SetLocals for the benefit of OSR.
255 set(operand1, value1);
256 set(operand2, value2);
258 // Now emit the real SetLocals.
259 set(operand1, value1);
260 set(operand2, value2);
263 NodeIndex injectLazyOperandSpeculation(NodeIndex nodeIndex)
265 Node& node = m_graph[nodeIndex];
266 ASSERT(node.op() == GetLocal);
267 ASSERT(node.codeOrigin.bytecodeIndex == m_currentIndex);
268 SpeculatedType prediction =
269 m_inlineStackTop->m_lazyOperands.prediction(
270 LazyOperandValueProfileKey(m_currentIndex, node.local()));
271 #if DFG_ENABLE(DEBUG_VERBOSE)
272 dataLog("Lazy operand [@", nodeIndex, ", bc#", m_currentIndex, ", r", node.local(), "] prediction: ", SpeculationDump(prediction), "\n");
274 node.variableAccessData()->predict(prediction);
278 // Used in implementing get/set, above, where the operand is a local variable.
279 NodeIndex getLocal(unsigned operand)
281 NodeIndex nodeIndex = m_currentBlock->variablesAtTail.local(operand);
282 bool isCaptured = m_codeBlock->isCaptured(operand, m_inlineStackTop->m_inlineCallFrame);
284 if (nodeIndex != NoNode) {
285 Node* nodePtr = &m_graph[nodeIndex];
286 if (nodePtr->op() == Flush) {
287 // Two possibilities: either the block wants the local to be live
288 // but has not loaded its value, or it has loaded its value, in
289 // which case we're done.
290 nodeIndex = nodePtr->child1().index();
291 Node& flushChild = m_graph[nodeIndex];
292 if (flushChild.op() == Phi) {
293 VariableAccessData* variableAccessData = flushChild.variableAccessData();
294 variableAccessData->mergeIsCaptured(isCaptured);
295 nodeIndex = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variableAccessData), nodeIndex));
296 m_currentBlock->variablesAtTail.local(operand) = nodeIndex;
299 nodePtr = &flushChild;
302 ASSERT(&m_graph[nodeIndex] == nodePtr);
303 ASSERT(nodePtr->op() != Flush);
305 nodePtr->variableAccessData()->mergeIsCaptured(isCaptured);
308 // We wish to use the same variable access data as the previous access,
309 // but for all other purposes we want to issue a load since for all we
310 // know, at this stage of compilation, the local has been clobbered.
312 // Make sure we link to the Phi node, not to the GetLocal.
313 if (nodePtr->op() == GetLocal)
314 nodeIndex = nodePtr->child1().index();
316 NodeIndex newGetLocal = injectLazyOperandSpeculation(
317 addToGraph(GetLocal, OpInfo(nodePtr->variableAccessData()), nodeIndex));
318 m_currentBlock->variablesAtTail.local(operand) = newGetLocal;
322 if (nodePtr->op() == GetLocal)
324 ASSERT(nodePtr->op() == SetLocal);
325 return nodePtr->child1().index();
328 // Check for reads of temporaries from prior blocks,
329 // expand m_preservedVars to cover these.
330 m_preservedVars.set(operand);
332 VariableAccessData* variableAccessData = newVariableAccessData(operand, isCaptured);
334 NodeIndex phi = addToGraph(Phi, OpInfo(variableAccessData));
335 m_localPhiStack.append(PhiStackEntry(m_currentBlock, phi, operand));
336 nodeIndex = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variableAccessData), phi));
337 m_currentBlock->variablesAtTail.local(operand) = nodeIndex;
339 m_currentBlock->variablesAtHead.setLocalFirstTime(operand, nodeIndex);
343 void setLocal(unsigned operand, NodeIndex value, SetMode setMode = NormalSet)
345 bool isCaptured = m_codeBlock->isCaptured(operand, m_inlineStackTop->m_inlineCallFrame);
347 if (setMode == NormalSet) {
348 ArgumentPosition* argumentPosition = findArgumentPositionForLocal(operand);
349 if (isCaptured || argumentPosition)
350 flushDirect(operand, argumentPosition);
353 VariableAccessData* variableAccessData = newVariableAccessData(operand, isCaptured);
354 variableAccessData->mergeStructureCheckHoistingFailed(
355 m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
356 NodeIndex nodeIndex = addToGraph(SetLocal, OpInfo(variableAccessData), value);
357 m_currentBlock->variablesAtTail.local(operand) = nodeIndex;
360 // Used in implementing get/set, above, where the operand is an argument.
361 NodeIndex getArgument(unsigned operand)
363 unsigned argument = operandToArgument(operand);
364 ASSERT(argument < m_numArguments);
366 NodeIndex nodeIndex = m_currentBlock->variablesAtTail.argument(argument);
367 bool isCaptured = m_codeBlock->isCaptured(operand);
369 if (nodeIndex != NoNode) {
370 Node* nodePtr = &m_graph[nodeIndex];
371 if (nodePtr->op() == Flush) {
372 // Two possibilities: either the block wants the local to be live
373 // but has not loaded its value, or it has loaded its value, in
374 // which case we're done.
375 nodeIndex = nodePtr->child1().index();
376 Node& flushChild = m_graph[nodeIndex];
377 if (flushChild.op() == Phi) {
378 VariableAccessData* variableAccessData = flushChild.variableAccessData();
379 variableAccessData->mergeIsCaptured(isCaptured);
380 nodeIndex = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variableAccessData), nodeIndex));
381 m_currentBlock->variablesAtTail.argument(argument) = nodeIndex;
384 nodePtr = &flushChild;
387 ASSERT(&m_graph[nodeIndex] == nodePtr);
388 ASSERT(nodePtr->op() != Flush);
390 nodePtr->variableAccessData()->mergeIsCaptured(isCaptured);
392 if (nodePtr->op() == SetArgument) {
393 // We're getting an argument in the first basic block; link
394 // the GetLocal to the SetArgument.
395 ASSERT(nodePtr->local() == static_cast<VirtualRegister>(operand));
396 VariableAccessData* variable = nodePtr->variableAccessData();
397 nodeIndex = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variable), nodeIndex));
398 m_currentBlock->variablesAtTail.argument(argument) = nodeIndex;
403 if (nodePtr->op() == GetLocal)
404 nodeIndex = nodePtr->child1().index();
405 return injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(nodePtr->variableAccessData()), nodeIndex));
408 if (nodePtr->op() == GetLocal)
411 ASSERT(nodePtr->op() == SetLocal);
412 return nodePtr->child1().index();
415 VariableAccessData* variableAccessData = newVariableAccessData(operand, isCaptured);
417 NodeIndex phi = addToGraph(Phi, OpInfo(variableAccessData));
418 m_argumentPhiStack.append(PhiStackEntry(m_currentBlock, phi, argument));
419 nodeIndex = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variableAccessData), phi));
420 m_currentBlock->variablesAtTail.argument(argument) = nodeIndex;
422 m_currentBlock->variablesAtHead.setArgumentFirstTime(argument, nodeIndex);
426 void setArgument(int operand, NodeIndex value, SetMode setMode = NormalSet)
428 unsigned argument = operandToArgument(operand);
429 ASSERT(argument < m_numArguments);
431 bool isCaptured = m_codeBlock->isCaptured(operand);
433 // Always flush arguments, except for 'this'.
434 if (argument && setMode == NormalSet)
435 flushDirect(operand);
437 VariableAccessData* variableAccessData = newVariableAccessData(operand, isCaptured);
438 variableAccessData->mergeStructureCheckHoistingFailed(
439 m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
440 NodeIndex nodeIndex = addToGraph(SetLocal, OpInfo(variableAccessData), value);
441 m_currentBlock->variablesAtTail.argument(argument) = nodeIndex;
444 ArgumentPosition* findArgumentPositionForArgument(int argument)
446 InlineStackEntry* stack = m_inlineStackTop;
447 while (stack->m_inlineCallFrame)
448 stack = stack->m_caller;
449 return stack->m_argumentPositions[argument];
452 ArgumentPosition* findArgumentPositionForLocal(int operand)
454 for (InlineStackEntry* stack = m_inlineStackTop; ; stack = stack->m_caller) {
455 InlineCallFrame* inlineCallFrame = stack->m_inlineCallFrame;
456 if (!inlineCallFrame)
458 if (operand >= static_cast<int>(inlineCallFrame->stackOffset - JSStack::CallFrameHeaderSize))
460 if (operand == inlineCallFrame->stackOffset + CallFrame::thisArgumentOffset())
462 if (operand < static_cast<int>(inlineCallFrame->stackOffset - JSStack::CallFrameHeaderSize - inlineCallFrame->arguments.size()))
464 int argument = operandToArgument(operand - inlineCallFrame->stackOffset);
465 return stack->m_argumentPositions[argument];
470 ArgumentPosition* findArgumentPosition(int operand)
472 if (operandIsArgument(operand))
473 return findArgumentPositionForArgument(operandToArgument(operand));
474 return findArgumentPositionForLocal(operand);
477 void flush(int operand)
479 flushDirect(m_inlineStackTop->remapOperand(operand));
482 void flushDirect(int operand)
484 flushDirect(operand, findArgumentPosition(operand));
487 void flushDirect(int operand, ArgumentPosition* argumentPosition)
489 // FIXME: This should check if the same operand had already been flushed to
490 // some other local variable.
492 bool isCaptured = m_codeBlock->isCaptured(operand, m_inlineStackTop->m_inlineCallFrame);
494 ASSERT(operand < FirstConstantRegisterIndex);
498 if (operandIsArgument(operand)) {
499 index = operandToArgument(operand);
500 nodeIndex = m_currentBlock->variablesAtTail.argument(index);
503 nodeIndex = m_currentBlock->variablesAtTail.local(index);
504 m_preservedVars.set(operand);
507 if (nodeIndex != NoNode) {
508 Node& node = m_graph[nodeIndex];
511 nodeIndex = node.child1().index();
514 nodeIndex = node.child1().index();
520 ASSERT(m_graph[nodeIndex].op() != Flush
521 && m_graph[nodeIndex].op() != GetLocal);
523 // Emit a Flush regardless of whether we already flushed it.
524 // This gives us guidance to see that the variable also needs to be flushed
525 // for arguments, even if it already had to be flushed for other reasons.
526 VariableAccessData* variableAccessData = node.variableAccessData();
527 variableAccessData->mergeIsCaptured(isCaptured);
528 addToGraph(Flush, OpInfo(variableAccessData), nodeIndex);
529 if (argumentPosition)
530 argumentPosition->addVariable(variableAccessData);
534 VariableAccessData* variableAccessData = newVariableAccessData(operand, isCaptured);
535 NodeIndex phi = addToGraph(Phi, OpInfo(variableAccessData));
536 nodeIndex = addToGraph(Flush, OpInfo(variableAccessData), phi);
537 if (operandIsArgument(operand)) {
538 m_argumentPhiStack.append(PhiStackEntry(m_currentBlock, phi, index));
539 m_currentBlock->variablesAtTail.argument(index) = nodeIndex;
540 m_currentBlock->variablesAtHead.setArgumentFirstTime(index, nodeIndex);
542 m_localPhiStack.append(PhiStackEntry(m_currentBlock, phi, index));
543 m_currentBlock->variablesAtTail.local(index) = nodeIndex;
544 m_currentBlock->variablesAtHead.setLocalFirstTime(index, nodeIndex);
546 if (argumentPosition)
547 argumentPosition->addVariable(variableAccessData);
550 void flushArgumentsAndCapturedVariables()
553 if (m_inlineStackTop->m_inlineCallFrame)
554 numArguments = m_inlineStackTop->m_inlineCallFrame->arguments.size();
556 numArguments = m_inlineStackTop->m_codeBlock->numParameters();
557 for (unsigned argument = numArguments; argument-- > 1;)
558 flush(argumentToOperand(argument));
559 for (int local = 0; local < m_inlineStackTop->m_codeBlock->m_numVars; ++local) {
560 if (!m_inlineStackTop->m_codeBlock->isCaptured(local))
566 // Get an operand, and perform a ToInt32/ToNumber conversion on it.
567 NodeIndex getToInt32(int operand)
569 return toInt32(get(operand));
572 // Perform an ES5 ToInt32 operation - returns a node of type NodeResultInt32.
573 NodeIndex toInt32(NodeIndex index)
575 Node& node = m_graph[index];
577 if (node.hasInt32Result())
580 if (node.op() == UInt32ToNumber)
581 return node.child1().index();
583 // Check for numeric constants boxed as JSValues.
584 if (node.op() == JSConstant) {
585 JSValue v = valueOfJSConstant(index);
587 return getJSConstant(node.constantNumber());
589 return getJSConstantForValue(JSValue(JSC::toInt32(v.asNumber())));
592 return addToGraph(ValueToInt32, index);
595 NodeIndex getJSConstantForValue(JSValue constantValue)
597 unsigned constantIndex = m_codeBlock->addOrFindConstant(constantValue);
598 if (constantIndex >= m_constants.size())
599 m_constants.append(ConstantRecord());
601 ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters());
603 return getJSConstant(constantIndex);
606 NodeIndex getJSConstant(unsigned constant)
608 NodeIndex index = m_constants[constant].asJSValue;
612 NodeIndex resultIndex = addToGraph(JSConstant, OpInfo(constant));
613 m_constants[constant].asJSValue = resultIndex;
617 NodeIndex getCallee()
619 return addToGraph(GetCallee);
622 // Helper functions to get/set the this value.
625 return get(m_inlineStackTop->m_codeBlock->thisRegister());
627 void setThis(NodeIndex value)
629 set(m_inlineStackTop->m_codeBlock->thisRegister(), value);
632 // Convenience methods for checking nodes for constants.
633 bool isJSConstant(NodeIndex index)
635 return m_graph[index].op() == JSConstant;
637 bool isInt32Constant(NodeIndex nodeIndex)
639 return isJSConstant(nodeIndex) && valueOfJSConstant(nodeIndex).isInt32();
641 // Convenience methods for getting constant values.
642 JSValue valueOfJSConstant(NodeIndex index)
644 ASSERT(isJSConstant(index));
645 return m_codeBlock->getConstant(FirstConstantRegisterIndex + m_graph[index].constantNumber());
647 int32_t valueOfInt32Constant(NodeIndex nodeIndex)
649 ASSERT(isInt32Constant(nodeIndex));
650 return valueOfJSConstant(nodeIndex).asInt32();
653 // This method returns a JSConstant with the value 'undefined'.
654 NodeIndex constantUndefined()
656 // Has m_constantUndefined been set up yet?
657 if (m_constantUndefined == UINT_MAX) {
658 // Search the constant pool for undefined, if we find it, we can just reuse this!
659 unsigned numberOfConstants = m_codeBlock->numberOfConstantRegisters();
660 for (m_constantUndefined = 0; m_constantUndefined < numberOfConstants; ++m_constantUndefined) {
661 JSValue testMe = m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantUndefined);
662 if (testMe.isUndefined())
663 return getJSConstant(m_constantUndefined);
666 // Add undefined to the CodeBlock's constants, and add a corresponding slot in m_constants.
667 ASSERT(m_constants.size() == numberOfConstants);
668 m_codeBlock->addConstant(jsUndefined());
669 m_constants.append(ConstantRecord());
670 ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters());
673 // m_constantUndefined must refer to an entry in the CodeBlock's constant pool that has the value 'undefined'.
674 ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantUndefined).isUndefined());
675 return getJSConstant(m_constantUndefined);
678 // This method returns a JSConstant with the value 'null'.
679 NodeIndex constantNull()
681 // Has m_constantNull been set up yet?
682 if (m_constantNull == UINT_MAX) {
683 // Search the constant pool for null, if we find it, we can just reuse this!
684 unsigned numberOfConstants = m_codeBlock->numberOfConstantRegisters();
685 for (m_constantNull = 0; m_constantNull < numberOfConstants; ++m_constantNull) {
686 JSValue testMe = m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNull);
688 return getJSConstant(m_constantNull);
691 // Add null to the CodeBlock's constants, and add a corresponding slot in m_constants.
692 ASSERT(m_constants.size() == numberOfConstants);
693 m_codeBlock->addConstant(jsNull());
694 m_constants.append(ConstantRecord());
695 ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters());
698 // m_constantNull must refer to an entry in the CodeBlock's constant pool that has the value 'null'.
699 ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNull).isNull());
700 return getJSConstant(m_constantNull);
703 // This method returns a DoubleConstant with the value 1.
706 // Has m_constant1 been set up yet?
707 if (m_constant1 == UINT_MAX) {
708 // Search the constant pool for the value 1, if we find it, we can just reuse this!
709 unsigned numberOfConstants = m_codeBlock->numberOfConstantRegisters();
710 for (m_constant1 = 0; m_constant1 < numberOfConstants; ++m_constant1) {
711 JSValue testMe = m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constant1);
712 if (testMe.isInt32() && testMe.asInt32() == 1)
713 return getJSConstant(m_constant1);
716 // Add the value 1 to the CodeBlock's constants, and add a corresponding slot in m_constants.
717 ASSERT(m_constants.size() == numberOfConstants);
718 m_codeBlock->addConstant(jsNumber(1));
719 m_constants.append(ConstantRecord());
720 ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters());
723 // m_constant1 must refer to an entry in the CodeBlock's constant pool that has the integer value 1.
724 ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constant1).isInt32());
725 ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constant1).asInt32() == 1);
726 return getJSConstant(m_constant1);
729 // This method returns a DoubleConstant with the value NaN.
730 NodeIndex constantNaN()
732 JSValue nan = jsNaN();
734 // Has m_constantNaN been set up yet?
735 if (m_constantNaN == UINT_MAX) {
736 // Search the constant pool for the value NaN, if we find it, we can just reuse this!
737 unsigned numberOfConstants = m_codeBlock->numberOfConstantRegisters();
738 for (m_constantNaN = 0; m_constantNaN < numberOfConstants; ++m_constantNaN) {
739 JSValue testMe = m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNaN);
740 if (JSValue::encode(testMe) == JSValue::encode(nan))
741 return getJSConstant(m_constantNaN);
744 // Add the value nan to the CodeBlock's constants, and add a corresponding slot in m_constants.
745 ASSERT(m_constants.size() == numberOfConstants);
746 m_codeBlock->addConstant(nan);
747 m_constants.append(ConstantRecord());
748 ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters());
751 // m_constantNaN must refer to an entry in the CodeBlock's constant pool that has the value nan.
752 ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNaN).isDouble());
753 ASSERT(isnan(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNaN).asDouble()));
754 return getJSConstant(m_constantNaN);
757 NodeIndex cellConstant(JSCell* cell)
759 HashMap<JSCell*, NodeIndex>::AddResult result = m_cellConstantNodes.add(cell, NoNode);
760 if (result.isNewEntry)
761 result.iterator->value = addToGraph(WeakJSConstant, OpInfo(cell));
763 return result.iterator->value;
766 CodeOrigin currentCodeOrigin()
768 return CodeOrigin(m_currentIndex, m_inlineStackTop->m_inlineCallFrame, m_currentProfilingIndex - m_currentIndex);
771 // These methods create a node and add it to the graph. If nodes of this type are
772 // 'mustGenerate' then the node will implicitly be ref'ed to ensure generation.
773 NodeIndex addToGraph(NodeType op, NodeIndex child1 = NoNode, NodeIndex child2 = NoNode, NodeIndex child3 = NoNode)
775 NodeIndex resultIndex = (NodeIndex)m_graph.size();
776 m_graph.append(Node(op, currentCodeOrigin(), child1, child2, child3));
778 m_currentBlock->append(resultIndex);
780 if (defaultFlags(op) & NodeMustGenerate)
781 m_graph.ref(resultIndex);
784 NodeIndex addToGraph(NodeType op, OpInfo info, NodeIndex child1 = NoNode, NodeIndex child2 = NoNode, NodeIndex child3 = NoNode)
786 NodeIndex resultIndex = (NodeIndex)m_graph.size();
787 m_graph.append(Node(op, currentCodeOrigin(), info, child1, child2, child3));
789 m_currentBlock->phis.append(resultIndex);
791 m_currentBlock->append(resultIndex);
793 if (defaultFlags(op) & NodeMustGenerate)
794 m_graph.ref(resultIndex);
797 NodeIndex addToGraph(NodeType op, OpInfo info1, OpInfo info2, NodeIndex child1 = NoNode, NodeIndex child2 = NoNode, NodeIndex child3 = NoNode)
799 NodeIndex resultIndex = (NodeIndex)m_graph.size();
800 m_graph.append(Node(op, currentCodeOrigin(), info1, info2, child1, child2, child3));
802 m_currentBlock->append(resultIndex);
804 if (defaultFlags(op) & NodeMustGenerate)
805 m_graph.ref(resultIndex);
809 NodeIndex addToGraph(Node::VarArgTag, NodeType op, OpInfo info1, OpInfo info2)
811 NodeIndex resultIndex = (NodeIndex)m_graph.size();
812 m_graph.append(Node(Node::VarArg, op, currentCodeOrigin(), info1, info2, m_graph.m_varArgChildren.size() - m_numPassedVarArgs, m_numPassedVarArgs));
814 m_currentBlock->append(resultIndex);
816 m_numPassedVarArgs = 0;
818 if (defaultFlags(op) & NodeMustGenerate)
819 m_graph.ref(resultIndex);
823 NodeIndex insertPhiNode(OpInfo info, BasicBlock* block)
825 NodeIndex resultIndex = (NodeIndex)m_graph.size();
826 m_graph.append(Node(Phi, currentCodeOrigin(), info));
827 block->phis.append(resultIndex);
832 void addVarArgChild(NodeIndex child)
834 m_graph.m_varArgChildren.append(Edge(child));
835 m_numPassedVarArgs++;
838 NodeIndex addCall(Interpreter* interpreter, Instruction* currentInstruction, NodeType op)
840 Instruction* putInstruction = currentInstruction + OPCODE_LENGTH(op_call);
842 SpeculatedType prediction = SpecNone;
843 if (interpreter->getOpcodeID(putInstruction->u.opcode) == op_call_put_result) {
844 m_currentProfilingIndex = m_currentIndex + OPCODE_LENGTH(op_call);
845 prediction = getPrediction();
848 addVarArgChild(get(currentInstruction[1].u.operand));
849 int argCount = currentInstruction[2].u.operand;
850 if (JSStack::CallFrameHeaderSize + (unsigned)argCount > m_parameterSlots)
851 m_parameterSlots = JSStack::CallFrameHeaderSize + argCount;
853 int registerOffset = currentInstruction[3].u.operand;
854 int dummyThisArgument = op == Call ? 0 : 1;
855 for (int i = 0 + dummyThisArgument; i < argCount; ++i)
856 addVarArgChild(get(registerOffset + argumentToOperand(i)));
858 NodeIndex call = addToGraph(Node::VarArg, op, OpInfo(0), OpInfo(prediction));
859 if (interpreter->getOpcodeID(putInstruction->u.opcode) == op_call_put_result)
860 set(putInstruction[1].u.operand, call);
864 NodeIndex addStructureTransitionCheck(JSCell* object, Structure* structure)
866 // Add a weak JS constant for the object regardless, since the code should
867 // be jettisoned if the object ever dies.
868 NodeIndex objectIndex = cellConstant(object);
870 if (object->structure() == structure && structure->transitionWatchpointSetIsStillValid()) {
871 addToGraph(StructureTransitionWatchpoint, OpInfo(structure), objectIndex);
875 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(structure)), objectIndex);
880 NodeIndex addStructureTransitionCheck(JSCell* object)
882 return addStructureTransitionCheck(object, object->structure());
885 SpeculatedType getPredictionWithoutOSRExit(NodeIndex nodeIndex, unsigned bytecodeIndex)
887 UNUSED_PARAM(nodeIndex);
889 SpeculatedType prediction = m_inlineStackTop->m_profiledBlock->valueProfilePredictionForBytecodeOffset(bytecodeIndex);
890 #if DFG_ENABLE(DEBUG_VERBOSE)
891 dataLog("Dynamic [@", nodeIndex, ", bc#", bytecodeIndex, "] prediction: ", SpeculationDump(prediction), "\n");
897 SpeculatedType getPrediction(NodeIndex nodeIndex, unsigned bytecodeIndex)
899 SpeculatedType prediction = getPredictionWithoutOSRExit(nodeIndex, bytecodeIndex);
901 if (prediction == SpecNone) {
902 // We have no information about what values this node generates. Give up
903 // on executing this code, since we're likely to do more damage than good.
904 addToGraph(ForceOSRExit);
910 SpeculatedType getPredictionWithoutOSRExit()
912 return getPredictionWithoutOSRExit(m_graph.size(), m_currentProfilingIndex);
915 SpeculatedType getPrediction()
917 return getPrediction(m_graph.size(), m_currentProfilingIndex);
920 ArrayMode getArrayMode(ArrayProfile* profile, Array::Action action)
922 profile->computeUpdatedPrediction(m_inlineStackTop->m_codeBlock);
923 return ArrayMode::fromObserved(profile, action, false);
926 ArrayMode getArrayMode(ArrayProfile* profile)
928 return getArrayMode(profile, Array::Read);
931 ArrayMode getArrayModeAndEmitChecks(ArrayProfile* profile, Array::Action action, NodeIndex base)
933 profile->computeUpdatedPrediction(m_inlineStackTop->m_codeBlock);
935 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
936 if (m_inlineStackTop->m_profiledBlock->numberOfRareCaseProfiles())
937 dataLogF("Slow case profile for bc#%u: %u\n", m_currentIndex, m_inlineStackTop->m_profiledBlock->rareCaseProfileForBytecodeOffset(m_currentIndex)->m_counter);
938 dataLogF("Array profile for bc#%u: %p%s%s, %u\n", m_currentIndex, profile->expectedStructure(), profile->structureIsPolymorphic() ? " (polymorphic)" : "", profile->mayInterceptIndexedAccesses() ? " (may intercept)" : "", profile->observedArrayModes());
942 m_inlineStackTop->m_profiledBlock->couldTakeSlowCase(m_currentIndex)
943 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, OutOfBounds);
945 ArrayMode result = ArrayMode::fromObserved(profile, action, makeSafe);
947 if (profile->hasDefiniteStructure() && result.benefitsFromStructureCheck())
948 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(profile->expectedStructure())), base);
953 NodeIndex makeSafe(NodeIndex nodeIndex)
955 Node& node = m_graph[nodeIndex];
957 bool likelyToTakeSlowCase;
958 if (!isX86() && node.op() == ArithMod)
959 likelyToTakeSlowCase = false;
961 likelyToTakeSlowCase = m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex);
963 if (!likelyToTakeSlowCase
964 && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow)
965 && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
968 switch (m_graph[nodeIndex].op()) {
974 case ArithMod: // for ArithMod "MayOverflow" means we tried to divide by zero, or we saw double.
975 m_graph[nodeIndex].mergeFlags(NodeMayOverflow);
979 if (m_inlineStackTop->m_profiledBlock->likelyToTakeDeepestSlowCase(m_currentIndex)
980 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow)) {
981 #if DFG_ENABLE(DEBUG_VERBOSE)
982 dataLogF("Making ArithMul @%u take deepest slow case.\n", nodeIndex);
984 m_graph[nodeIndex].mergeFlags(NodeMayOverflow | NodeMayNegZero);
985 } else if (m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)
986 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero)) {
987 #if DFG_ENABLE(DEBUG_VERBOSE)
988 dataLogF("Making ArithMul @%u take faster slow case.\n", nodeIndex);
990 m_graph[nodeIndex].mergeFlags(NodeMayNegZero);
995 ASSERT_NOT_REACHED();
1002 NodeIndex makeDivSafe(NodeIndex nodeIndex)
1004 ASSERT(m_graph[nodeIndex].op() == ArithDiv);
1006 // The main slow case counter for op_div in the old JIT counts only when
1007 // the operands are not numbers. We don't care about that since we already
1008 // have speculations in place that take care of that separately. We only
1009 // care about when the outcome of the division is not an integer, which
1010 // is what the special fast case counter tells us.
1012 if (!m_inlineStackTop->m_profiledBlock->couldTakeSpecialFastCase(m_currentIndex)
1013 && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow)
1014 && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
1017 #if DFG_ENABLE(DEBUG_VERBOSE)
1018 dataLogF("Making %s @%u safe at bc#%u because special fast-case counter is at %u and exit profiles say %d, %d\n", Graph::opName(m_graph[nodeIndex].op()), nodeIndex, m_currentIndex, m_inlineStackTop->m_profiledBlock->specialFastCaseProfileForBytecodeOffset(m_currentIndex)->m_counter, m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow), m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero));
1021 // FIXME: It might be possible to make this more granular. The DFG certainly can
1022 // distinguish between negative zero and overflow in its exit profiles.
1023 m_graph[nodeIndex].mergeFlags(NodeMayOverflow | NodeMayNegZero);
1028 bool willNeedFlush(StructureStubInfo& stubInfo)
1030 PolymorphicAccessStructureList* list;
1032 switch (stubInfo.accessType) {
1033 case access_get_by_id_self_list:
1034 list = stubInfo.u.getByIdSelfList.structureList;
1035 listSize = stubInfo.u.getByIdSelfList.listSize;
1037 case access_get_by_id_proto_list:
1038 list = stubInfo.u.getByIdProtoList.structureList;
1039 listSize = stubInfo.u.getByIdProtoList.listSize;
1044 for (int i = 0; i < listSize; ++i) {
1045 if (!list->list[i].isDirect)
1051 bool structureChainIsStillValid(bool direct, Structure* previousStructure, StructureChain* chain)
1056 if (!previousStructure->storedPrototype().isNull() && previousStructure->storedPrototype().asCell()->structure() != chain->head()->get())
1059 for (WriteBarrier<Structure>* it = chain->head(); *it; ++it) {
1060 if (!(*it)->storedPrototype().isNull() && (*it)->storedPrototype().asCell()->structure() != it[1].get())
1067 void buildOperandMapsIfNecessary();
1070 JSGlobalData* m_globalData;
1071 CodeBlock* m_codeBlock;
1072 CodeBlock* m_profiledBlock;
1075 // The current block being generated.
1076 BasicBlock* m_currentBlock;
1077 // The bytecode index of the current instruction being generated.
1078 unsigned m_currentIndex;
1079 // The bytecode index of the value profile of the current instruction being generated.
1080 unsigned m_currentProfilingIndex;
1082 // We use these values during code generation, and to avoid the need for
1083 // special handling we make sure they are available as constants in the
1084 // CodeBlock's constant pool. These variables are initialized to
1085 // UINT_MAX, and lazily updated to hold an index into the CodeBlock's
1086 // constant pool, as necessary.
1087 unsigned m_constantUndefined;
1088 unsigned m_constantNull;
1089 unsigned m_constantNaN;
1090 unsigned m_constant1;
1091 HashMap<JSCell*, unsigned> m_cellConstants;
1092 HashMap<JSCell*, NodeIndex> m_cellConstantNodes;
1094 // A constant in the constant pool may be represented by more than one
1095 // node in the graph, depending on the context in which it is being used.
1096 struct ConstantRecord {
1105 NodeIndex asNumeric;
1106 NodeIndex asJSValue;
1109 // Track the index of the node whose result is the current value for every
1110 // register value in the bytecode - argument, local, and temporary.
1111 Vector<ConstantRecord, 16> m_constants;
1113 // The number of arguments passed to the function.
1114 unsigned m_numArguments;
1115 // The number of locals (vars + temporaries) used in the function.
1116 unsigned m_numLocals;
1117 // The set of registers we need to preserve across BasicBlock boundaries;
1118 // typically equal to the set of vars, but we expand this to cover all
1119 // temporaries that persist across blocks (dues to ?:, &&, ||, etc).
1120 BitVector m_preservedVars;
1121 // The number of slots (in units of sizeof(Register)) that we need to
1122 // preallocate for calls emanating from this frame. This includes the
1123 // size of the CallFrame, only if this is not a leaf function. (I.e.
1124 // this is 0 if and only if this function is a leaf.)
1125 unsigned m_parameterSlots;
1126 // The number of var args passed to the next var arg node.
1127 unsigned m_numPassedVarArgs;
1128 // The index in the global resolve info.
1129 unsigned m_globalResolveNumber;
1131 struct PhiStackEntry {
1132 PhiStackEntry(BasicBlock* block, NodeIndex phi, unsigned varNo)
1139 BasicBlock* m_block;
1143 Vector<PhiStackEntry, 16> m_argumentPhiStack;
1144 Vector<PhiStackEntry, 16> m_localPhiStack;
1146 HashMap<ConstantBufferKey, unsigned> m_constantBufferCache;
1148 struct InlineStackEntry {
1149 ByteCodeParser* m_byteCodeParser;
1151 CodeBlock* m_codeBlock;
1152 CodeBlock* m_profiledBlock;
1153 InlineCallFrame* m_inlineCallFrame;
1154 VirtualRegister m_calleeVR; // absolute virtual register, not relative to call frame
1156 ScriptExecutable* executable() { return m_codeBlock->ownerExecutable(); }
1158 QueryableExitProfile m_exitProfile;
1160 // Remapping of identifier and constant numbers from the code block being
1161 // inlined (inline callee) to the code block that we're inlining into
1162 // (the machine code block, which is the transitive, though not necessarily
1164 Vector<unsigned> m_identifierRemap;
1165 Vector<unsigned> m_constantRemap;
1166 Vector<unsigned> m_constantBufferRemap;
1167 Vector<unsigned> m_resolveOperationRemap;
1168 Vector<unsigned> m_putToBaseOperationRemap;
1170 // Blocks introduced by this code block, which need successor linking.
1171 // May include up to one basic block that includes the continuation after
1172 // the callsite in the caller. These must be appended in the order that they
1173 // are created, but their bytecodeBegin values need not be in order as they
1175 Vector<UnlinkedBlock> m_unlinkedBlocks;
1177 // Potential block linking targets. Must be sorted by bytecodeBegin, and
1178 // cannot have two blocks that have the same bytecodeBegin. For this very
1179 // reason, this is not equivalent to
1180 Vector<BlockIndex> m_blockLinkingTargets;
1182 // If the callsite's basic block was split into two, then this will be
1183 // the head of the callsite block. It needs its successors linked to the
1184 // m_unlinkedBlocks, but not the other way around: there's no way for
1185 // any blocks in m_unlinkedBlocks to jump back into this block.
1186 BlockIndex m_callsiteBlockHead;
1188 // Does the callsite block head need linking? This is typically true
1189 // but will be false for the machine code block's inline stack entry
1190 // (since that one is not inlined) and for cases where an inline callee
1191 // did the linking for us.
1192 bool m_callsiteBlockHeadNeedsLinking;
1194 VirtualRegister m_returnValue;
1196 // Speculations about variable types collected from the profiled code block,
1197 // which are based on OSR exit profiles that past DFG compilatins of this
1198 // code block had gathered.
1199 LazyOperandValueProfileParser m_lazyOperands;
1201 // Did we see any returns? We need to handle the (uncommon but necessary)
1202 // case where a procedure that does not return was inlined.
1205 // Did we have any early returns?
1206 bool m_didEarlyReturn;
1208 // Pointers to the argument position trackers for this slice of code.
1209 Vector<ArgumentPosition*> m_argumentPositions;
1211 InlineStackEntry* m_caller;
1216 CodeBlock* profiledBlock,
1217 BlockIndex callsiteBlockHead,
1218 VirtualRegister calleeVR,
1220 VirtualRegister returnValueVR,
1221 VirtualRegister inlineCallFrameStart,
1222 int argumentCountIncludingThis,
1223 CodeSpecializationKind);
1227 m_byteCodeParser->m_inlineStackTop = m_caller;
1230 int remapOperand(int operand) const
1232 if (!m_inlineCallFrame)
1235 if (operand >= FirstConstantRegisterIndex) {
1236 int result = m_constantRemap[operand - FirstConstantRegisterIndex];
1237 ASSERT(result >= FirstConstantRegisterIndex);
1241 if (operand == JSStack::Callee)
1244 return operand + m_inlineCallFrame->stackOffset;
1248 InlineStackEntry* m_inlineStackTop;
1250 // Have we built operand maps? We initialize them lazily, and only when doing
1252 bool m_haveBuiltOperandMaps;
1253 // Mapping between identifier names and numbers.
1254 IdentifierMap m_identifierMap;
1255 // Mapping between values and constant numbers.
1256 JSValueMap m_jsValueMap;
1257 // Index of the empty value, or UINT_MAX if there is no mapping. This is a horrible
1258 // work-around for the fact that JSValueMap can't handle "empty" values.
1259 unsigned m_emptyJSValueIndex;
1261 // Cache of code blocks that we've generated bytecode for.
1262 ByteCodeCache<canInlineFunctionFor> m_codeBlockCache;
1264 Instruction* m_currentInstruction;
1267 #define NEXT_OPCODE(name) \
1268 m_currentIndex += OPCODE_LENGTH(name); \
1271 #define LAST_OPCODE(name) \
1272 m_currentIndex += OPCODE_LENGTH(name); \
1273 return shouldContinueParsing
1276 void ByteCodeParser::handleCall(Interpreter* interpreter, Instruction* currentInstruction, NodeType op, CodeSpecializationKind kind)
1278 ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct));
1280 NodeIndex callTarget = get(currentInstruction[1].u.operand);
1283 ConstantInternalFunction,
1288 CallLinkStatus callLinkStatus = CallLinkStatus::computeFor(
1289 m_inlineStackTop->m_profiledBlock, m_currentIndex);
1291 #if DFG_ENABLE(DEBUG_VERBOSE)
1292 dataLogF("For call at @%lu bc#%u: ", m_graph.size(), m_currentIndex);
1293 if (callLinkStatus.isSet()) {
1294 if (callLinkStatus.couldTakeSlowPath())
1295 dataLogF("could take slow path, ");
1296 dataLogF("target = %p\n", callLinkStatus.callTarget());
1298 dataLogF("not set.\n");
1301 if (m_graph.isFunctionConstant(callTarget)) {
1302 callType = ConstantFunction;
1303 #if DFG_ENABLE(DEBUG_VERBOSE)
1304 dataLogF("Call at [@%lu, bc#%u] has a function constant: %p, exec %p.\n",
1305 m_graph.size(), m_currentIndex,
1306 m_graph.valueOfFunctionConstant(callTarget),
1307 m_graph.valueOfFunctionConstant(callTarget)->executable());
1309 } else if (m_graph.isInternalFunctionConstant(callTarget)) {
1310 callType = ConstantInternalFunction;
1311 #if DFG_ENABLE(DEBUG_VERBOSE)
1312 dataLogF("Call at [@%lu, bc#%u] has an internal function constant: %p.\n",
1313 m_graph.size(), m_currentIndex,
1314 m_graph.valueOfInternalFunctionConstant(callTarget));
1316 } else if (callLinkStatus.isSet() && !callLinkStatus.couldTakeSlowPath()
1317 && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)) {
1318 callType = LinkedFunction;
1319 #if DFG_ENABLE(DEBUG_VERBOSE)
1320 dataLogF("Call at [@%lu, bc#%u] is linked to: %p, exec %p.\n",
1321 m_graph.size(), m_currentIndex, callLinkStatus.callTarget(),
1322 callLinkStatus.callTarget()->executable());
1325 callType = UnknownFunction;
1326 #if DFG_ENABLE(DEBUG_VERBOSE)
1327 dataLogF("Call at [@%lu, bc#%u] is has an unknown or ambiguous target.\n",
1328 m_graph.size(), m_currentIndex);
1331 if (callType != UnknownFunction) {
1332 int argumentCountIncludingThis = currentInstruction[2].u.operand;
1333 int registerOffset = currentInstruction[3].u.operand;
1335 // Do we have a result?
1336 bool usesResult = false;
1337 int resultOperand = 0; // make compiler happy
1338 unsigned nextOffset = m_currentIndex + OPCODE_LENGTH(op_call);
1339 Instruction* putInstruction = currentInstruction + OPCODE_LENGTH(op_call);
1340 SpeculatedType prediction = SpecNone;
1341 if (interpreter->getOpcodeID(putInstruction->u.opcode) == op_call_put_result) {
1342 resultOperand = putInstruction[1].u.operand;
1344 m_currentProfilingIndex = nextOffset;
1345 prediction = getPrediction();
1346 nextOffset += OPCODE_LENGTH(op_call_put_result);
1349 if (callType == ConstantInternalFunction) {
1350 if (handleConstantInternalFunction(usesResult, resultOperand, m_graph.valueOfInternalFunctionConstant(callTarget), registerOffset, argumentCountIncludingThis, prediction, kind))
1353 // Can only handle this using the generic call handler.
1354 addCall(interpreter, currentInstruction, op);
1358 JSFunction* expectedFunction;
1359 Intrinsic intrinsic;
1360 bool certainAboutExpectedFunction;
1361 if (callType == ConstantFunction) {
1362 expectedFunction = m_graph.valueOfFunctionConstant(callTarget);
1363 intrinsic = expectedFunction->executable()->intrinsicFor(kind);
1364 certainAboutExpectedFunction = true;
1366 ASSERT(callType == LinkedFunction);
1367 expectedFunction = callLinkStatus.callTarget();
1368 intrinsic = expectedFunction->executable()->intrinsicFor(kind);
1369 certainAboutExpectedFunction = false;
1372 if (intrinsic != NoIntrinsic) {
1373 if (!certainAboutExpectedFunction)
1374 emitFunctionCheck(expectedFunction, callTarget, registerOffset, kind);
1376 if (handleIntrinsic(usesResult, resultOperand, intrinsic, registerOffset, argumentCountIncludingThis, prediction)) {
1377 if (!certainAboutExpectedFunction) {
1378 // Need to keep the call target alive for OSR. We could easily optimize this out if we wanted
1379 // to, since at this point we know that the call target is a constant. It's just that OSR isn't
1380 // smart enough to figure that out, since it doesn't understand CheckFunction.
1381 addToGraph(Phantom, callTarget);
1386 } else if (handleInlining(usesResult, currentInstruction[1].u.operand, callTarget, resultOperand, certainAboutExpectedFunction, expectedFunction, registerOffset, argumentCountIncludingThis, nextOffset, kind))
1390 addCall(interpreter, currentInstruction, op);
1393 void ByteCodeParser::emitFunctionCheck(JSFunction* expectedFunction, NodeIndex callTarget, int registerOffset, CodeSpecializationKind kind)
1395 NodeIndex thisArgument;
1396 if (kind == CodeForCall)
1397 thisArgument = get(registerOffset + argumentToOperand(0));
1399 thisArgument = NoNode;
1400 addToGraph(CheckFunction, OpInfo(expectedFunction), callTarget, thisArgument);
1403 bool ByteCodeParser::handleInlining(bool usesResult, int callTarget, NodeIndex callTargetNodeIndex, int resultOperand, bool certainAboutExpectedFunction, JSFunction* expectedFunction, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, CodeSpecializationKind kind)
1405 // First, the really simple checks: do we have an actual JS function?
1406 if (!expectedFunction)
1408 if (expectedFunction->isHostFunction())
1411 FunctionExecutable* executable = expectedFunction->jsExecutable();
1413 // Does the number of arguments we're passing match the arity of the target? We currently
1414 // inline only if the number of arguments passed is greater than or equal to the number
1415 // arguments expected.
1416 if (static_cast<int>(executable->parameterCount()) + 1 > argumentCountIncludingThis)
1419 // Have we exceeded inline stack depth, or are we trying to inline a recursive call?
1420 // If either of these are detected, then don't inline.
1422 for (InlineStackEntry* entry = m_inlineStackTop; entry; entry = entry->m_caller) {
1424 if (depth >= Options::maximumInliningDepth())
1425 return false; // Depth exceeded.
1427 if (entry->executable() == executable)
1428 return false; // Recursion detected.
1431 // Does the code block's size match the heuristics/requirements for being
1432 // an inline candidate?
1433 CodeBlock* profiledBlock = executable->profiledCodeBlockFor(kind);
1437 if (!mightInlineFunctionFor(profiledBlock, kind))
1440 // If we get here then it looks like we should definitely inline this code. Proceed
1441 // with parsing the code to get bytecode, so that we can then parse the bytecode.
1442 // Note that if LLInt is enabled, the bytecode will always be available. Also note
1443 // that if LLInt is enabled, we may inline a code block that has never been JITted
1445 CodeBlock* codeBlock = m_codeBlockCache.get(CodeBlockKey(executable, kind), expectedFunction->scope());
1449 ASSERT(canInlineFunctionFor(codeBlock, kind));
1451 #if DFG_ENABLE(DEBUG_VERBOSE)
1452 dataLogF("Inlining executable %p.\n", executable);
1455 // Now we know without a doubt that we are committed to inlining. So begin the process
1456 // by checking the callee (if necessary) and making sure that arguments and the callee
1458 if (!certainAboutExpectedFunction)
1459 emitFunctionCheck(expectedFunction, callTargetNodeIndex, registerOffset, kind);
1461 // FIXME: Don't flush constants!
1463 int inlineCallFrameStart = m_inlineStackTop->remapOperand(registerOffset) - JSStack::CallFrameHeaderSize;
1465 // Make sure that the area used by the call frame is reserved.
1466 for (int arg = inlineCallFrameStart + JSStack::CallFrameHeaderSize + codeBlock->m_numVars; arg-- > inlineCallFrameStart;)
1467 m_preservedVars.set(arg);
1469 // Make sure that we have enough locals.
1470 unsigned newNumLocals = inlineCallFrameStart + JSStack::CallFrameHeaderSize + codeBlock->m_numCalleeRegisters;
1471 if (newNumLocals > m_numLocals) {
1472 m_numLocals = newNumLocals;
1473 for (size_t i = 0; i < m_graph.m_blocks.size(); ++i)
1474 m_graph.m_blocks[i]->ensureLocals(newNumLocals);
1477 size_t argumentPositionStart = m_graph.m_argumentPositions.size();
1479 InlineStackEntry inlineStackEntry(
1480 this, codeBlock, profiledBlock, m_graph.m_blocks.size() - 1,
1481 (VirtualRegister)m_inlineStackTop->remapOperand(callTarget), expectedFunction,
1482 (VirtualRegister)m_inlineStackTop->remapOperand(
1483 usesResult ? resultOperand : InvalidVirtualRegister),
1484 (VirtualRegister)inlineCallFrameStart, argumentCountIncludingThis, kind);
1486 // This is where the actual inlining really happens.
1487 unsigned oldIndex = m_currentIndex;
1488 unsigned oldProfilingIndex = m_currentProfilingIndex;
1490 m_currentProfilingIndex = 0;
1492 addToGraph(InlineStart, OpInfo(argumentPositionStart));
1496 m_currentIndex = oldIndex;
1497 m_currentProfilingIndex = oldProfilingIndex;
1499 // If the inlined code created some new basic blocks, then we have linking to do.
1500 if (inlineStackEntry.m_callsiteBlockHead != m_graph.m_blocks.size() - 1) {
1502 ASSERT(!inlineStackEntry.m_unlinkedBlocks.isEmpty());
1503 if (inlineStackEntry.m_callsiteBlockHeadNeedsLinking)
1504 linkBlock(m_graph.m_blocks[inlineStackEntry.m_callsiteBlockHead].get(), inlineStackEntry.m_blockLinkingTargets);
1506 ASSERT(m_graph.m_blocks[inlineStackEntry.m_callsiteBlockHead]->isLinked);
1508 // It's possible that the callsite block head is not owned by the caller.
1509 if (!inlineStackEntry.m_caller->m_unlinkedBlocks.isEmpty()) {
1510 // It's definitely owned by the caller, because the caller created new blocks.
1511 // Assert that this all adds up.
1512 ASSERT(inlineStackEntry.m_caller->m_unlinkedBlocks.last().m_blockIndex == inlineStackEntry.m_callsiteBlockHead);
1513 ASSERT(inlineStackEntry.m_caller->m_unlinkedBlocks.last().m_needsNormalLinking);
1514 inlineStackEntry.m_caller->m_unlinkedBlocks.last().m_needsNormalLinking = false;
1516 // It's definitely not owned by the caller. Tell the caller that he does not
1517 // need to link his callsite block head, because we did it for him.
1518 ASSERT(inlineStackEntry.m_caller->m_callsiteBlockHeadNeedsLinking);
1519 ASSERT(inlineStackEntry.m_caller->m_callsiteBlockHead == inlineStackEntry.m_callsiteBlockHead);
1520 inlineStackEntry.m_caller->m_callsiteBlockHeadNeedsLinking = false;
1523 linkBlocks(inlineStackEntry.m_unlinkedBlocks, inlineStackEntry.m_blockLinkingTargets);
1525 ASSERT(inlineStackEntry.m_unlinkedBlocks.isEmpty());
1527 // If there was a return, but no early returns, then we're done. We allow parsing of
1528 // the caller to continue in whatever basic block we're in right now.
1529 if (!inlineStackEntry.m_didEarlyReturn && inlineStackEntry.m_didReturn) {
1530 BasicBlock* lastBlock = m_graph.m_blocks.last().get();
1531 ASSERT(lastBlock->isEmpty() || !m_graph.last().isTerminal());
1533 // If we created new blocks then the last block needs linking, but in the
1534 // caller. It doesn't need to be linked to, but it needs outgoing links.
1535 if (!inlineStackEntry.m_unlinkedBlocks.isEmpty()) {
1536 #if DFG_ENABLE(DEBUG_VERBOSE)
1537 dataLogF("Reascribing bytecode index of block %p from bc#%u to bc#%u (inline return case).\n", lastBlock, lastBlock->bytecodeBegin, m_currentIndex);
1539 // For debugging purposes, set the bytecodeBegin. Note that this doesn't matter
1540 // for release builds because this block will never serve as a potential target
1541 // in the linker's binary search.
1542 lastBlock->bytecodeBegin = m_currentIndex;
1543 m_inlineStackTop->m_caller->m_unlinkedBlocks.append(UnlinkedBlock(m_graph.m_blocks.size() - 1));
1546 m_currentBlock = m_graph.m_blocks.last().get();
1548 #if DFG_ENABLE(DEBUG_VERBOSE)
1549 dataLogF("Done inlining executable %p, continuing code generation at epilogue.\n", executable);
1554 // If we get to this point then all blocks must end in some sort of terminals.
1555 ASSERT(m_graph.last().isTerminal());
1557 // Link the early returns to the basic block we're about to create.
1558 for (size_t i = 0; i < inlineStackEntry.m_unlinkedBlocks.size(); ++i) {
1559 if (!inlineStackEntry.m_unlinkedBlocks[i].m_needsEarlyReturnLinking)
1561 BasicBlock* block = m_graph.m_blocks[inlineStackEntry.m_unlinkedBlocks[i].m_blockIndex].get();
1562 ASSERT(!block->isLinked);
1563 Node& node = m_graph[block->last()];
1564 ASSERT(node.op() == Jump);
1565 ASSERT(node.takenBlockIndex() == NoBlock);
1566 node.setTakenBlockIndex(m_graph.m_blocks.size());
1567 inlineStackEntry.m_unlinkedBlocks[i].m_needsEarlyReturnLinking = false;
1568 #if !ASSERT_DISABLED
1569 block->isLinked = true;
1573 // Need to create a new basic block for the continuation at the caller.
1574 OwnPtr<BasicBlock> block = adoptPtr(new BasicBlock(nextOffset, m_numArguments, m_numLocals));
1575 #if DFG_ENABLE(DEBUG_VERBOSE)
1576 dataLogF("Creating inline epilogue basic block %p, #%zu for %p bc#%u at inline depth %u.\n", block.get(), m_graph.m_blocks.size(), m_inlineStackTop->executable(), m_currentIndex, CodeOrigin::inlineDepthForCallFrame(m_inlineStackTop->m_inlineCallFrame));
1578 m_currentBlock = block.get();
1579 ASSERT(m_inlineStackTop->m_caller->m_blockLinkingTargets.isEmpty() || m_graph.m_blocks[m_inlineStackTop->m_caller->m_blockLinkingTargets.last()]->bytecodeBegin < nextOffset);
1580 m_inlineStackTop->m_caller->m_unlinkedBlocks.append(UnlinkedBlock(m_graph.m_blocks.size()));
1581 m_inlineStackTop->m_caller->m_blockLinkingTargets.append(m_graph.m_blocks.size());
1582 m_graph.m_blocks.append(block.release());
1583 prepareToParseBlock();
1585 // At this point we return and continue to generate code for the caller, but
1586 // in the new basic block.
1587 #if DFG_ENABLE(DEBUG_VERBOSE)
1588 dataLogF("Done inlining executable %p, continuing code generation in new block.\n", executable);
1593 void ByteCodeParser::setIntrinsicResult(bool usesResult, int resultOperand, NodeIndex nodeIndex)
1597 set(resultOperand, nodeIndex);
1600 bool ByteCodeParser::handleMinMax(bool usesResult, int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis)
1602 if (argumentCountIncludingThis == 1) { // Math.min()
1603 setIntrinsicResult(usesResult, resultOperand, constantNaN());
1607 if (argumentCountIncludingThis == 2) { // Math.min(x)
1608 // FIXME: what we'd really like is a ValueToNumber, except we don't support that right now. Oh well.
1609 NodeIndex result = get(registerOffset + argumentToOperand(1));
1610 addToGraph(CheckNumber, result);
1611 setIntrinsicResult(usesResult, resultOperand, result);
1615 if (argumentCountIncludingThis == 3) { // Math.min(x, y)
1616 setIntrinsicResult(usesResult, resultOperand, addToGraph(op, get(registerOffset + argumentToOperand(1)), get(registerOffset + argumentToOperand(2))));
1620 // Don't handle >=3 arguments for now.
1624 // FIXME: We dead-code-eliminate unused Math intrinsics, but that's invalid because
1625 // they need to perform the ToNumber conversion, which can have side-effects.
1626 bool ByteCodeParser::handleIntrinsic(bool usesResult, int resultOperand, Intrinsic intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction)
1628 switch (intrinsic) {
1629 case AbsIntrinsic: {
1630 if (argumentCountIncludingThis == 1) { // Math.abs()
1631 setIntrinsicResult(usesResult, resultOperand, constantNaN());
1635 if (!MacroAssembler::supportsFloatingPointAbs())
1638 NodeIndex nodeIndex = addToGraph(ArithAbs, get(registerOffset + argumentToOperand(1)));
1639 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
1640 m_graph[nodeIndex].mergeFlags(NodeMayOverflow);
1641 setIntrinsicResult(usesResult, resultOperand, nodeIndex);
1646 return handleMinMax(usesResult, resultOperand, ArithMin, registerOffset, argumentCountIncludingThis);
1649 return handleMinMax(usesResult, resultOperand, ArithMax, registerOffset, argumentCountIncludingThis);
1651 case SqrtIntrinsic: {
1652 if (argumentCountIncludingThis == 1) { // Math.sqrt()
1653 setIntrinsicResult(usesResult, resultOperand, constantNaN());
1657 if (!MacroAssembler::supportsFloatingPointSqrt())
1660 setIntrinsicResult(usesResult, resultOperand, addToGraph(ArithSqrt, get(registerOffset + argumentToOperand(1))));
1664 case ArrayPushIntrinsic: {
1665 if (argumentCountIncludingThis != 2)
1668 ArrayMode arrayMode = getArrayMode(m_currentInstruction[5].u.arrayProfile);
1669 if (!arrayMode.isJSArray())
1671 switch (arrayMode.type()) {
1672 case Array::Undecided:
1675 case Array::Contiguous:
1676 case Array::ArrayStorage: {
1677 NodeIndex arrayPush = addToGraph(ArrayPush, OpInfo(arrayMode.asWord()), OpInfo(prediction), get(registerOffset + argumentToOperand(0)), get(registerOffset + argumentToOperand(1)));
1679 set(resultOperand, arrayPush);
1689 case ArrayPopIntrinsic: {
1690 if (argumentCountIncludingThis != 1)
1693 ArrayMode arrayMode = getArrayMode(m_currentInstruction[5].u.arrayProfile);
1694 if (!arrayMode.isJSArray())
1696 switch (arrayMode.type()) {
1699 case Array::Contiguous:
1700 case Array::ArrayStorage: {
1701 NodeIndex arrayPop = addToGraph(ArrayPop, OpInfo(arrayMode.asWord()), OpInfo(prediction), get(registerOffset + argumentToOperand(0)));
1703 set(resultOperand, arrayPop);
1712 case CharCodeAtIntrinsic: {
1713 if (argumentCountIncludingThis != 2)
1716 int thisOperand = registerOffset + argumentToOperand(0);
1717 int indexOperand = registerOffset + argumentToOperand(1);
1718 NodeIndex charCode = addToGraph(StringCharCodeAt, OpInfo(ArrayMode(Array::String).asWord()), get(thisOperand), getToInt32(indexOperand));
1721 set(resultOperand, charCode);
1725 case CharAtIntrinsic: {
1726 if (argumentCountIncludingThis != 2)
1729 int thisOperand = registerOffset + argumentToOperand(0);
1730 int indexOperand = registerOffset + argumentToOperand(1);
1731 NodeIndex charCode = addToGraph(StringCharAt, OpInfo(ArrayMode(Array::String).asWord()), get(thisOperand), getToInt32(indexOperand));
1734 set(resultOperand, charCode);
1738 case RegExpExecIntrinsic: {
1739 if (argumentCountIncludingThis != 2)
1742 NodeIndex regExpExec = addToGraph(RegExpExec, OpInfo(0), OpInfo(prediction), get(registerOffset + argumentToOperand(0)), get(registerOffset + argumentToOperand(1)));
1744 set(resultOperand, regExpExec);
1749 case RegExpTestIntrinsic: {
1750 if (argumentCountIncludingThis != 2)
1753 NodeIndex regExpExec = addToGraph(RegExpTest, OpInfo(0), OpInfo(prediction), get(registerOffset + argumentToOperand(0)), get(registerOffset + argumentToOperand(1)));
1755 set(resultOperand, regExpExec);
1765 bool ByteCodeParser::handleConstantInternalFunction(
1766 bool usesResult, int resultOperand, InternalFunction* function, int registerOffset,
1767 int argumentCountIncludingThis, SpeculatedType prediction, CodeSpecializationKind kind)
1769 // If we ever find that we have a lot of internal functions that we specialize for,
1770 // then we should probably have some sort of hashtable dispatch, or maybe even
1771 // dispatch straight through the MethodTable of the InternalFunction. But for now,
1772 // it seems that this case is hit infrequently enough, and the number of functions
1773 // we know about is small enough, that having just a linear cascade of if statements
1776 UNUSED_PARAM(prediction); // Remove this once we do more things.
1777 UNUSED_PARAM(kind); // Remove this once we do more things.
1779 if (function->classInfo() == &ArrayConstructor::s_info) {
1780 if (argumentCountIncludingThis == 2) {
1782 usesResult, resultOperand,
1783 addToGraph(NewArrayWithSize, OpInfo(ArrayWithUndecided), get(registerOffset + argumentToOperand(1))));
1787 for (int i = 1; i < argumentCountIncludingThis; ++i)
1788 addVarArgChild(get(registerOffset + argumentToOperand(i)));
1790 usesResult, resultOperand,
1791 addToGraph(Node::VarArg, NewArray, OpInfo(ArrayWithUndecided), OpInfo(0)));
1798 NodeIndex ByteCodeParser::handleGetByOffset(SpeculatedType prediction, NodeIndex base, unsigned identifierNumber, PropertyOffset offset)
1800 NodeIndex propertyStorage;
1801 if (isInlineOffset(offset))
1802 propertyStorage = base;
1804 propertyStorage = addToGraph(GetButterfly, base);
1805 // FIXME: It would be far more efficient for load elimination (and safer from
1806 // an OSR standpoint) if GetByOffset also referenced the object we were loading
1807 // from, and if we could load eliminate a GetByOffset even if the butterfly
1808 // had changed. That would be a great success.
1809 NodeIndex getByOffset = addToGraph(GetByOffset, OpInfo(m_graph.m_storageAccessData.size()), OpInfo(prediction), propertyStorage);
1811 StorageAccessData storageAccessData;
1812 storageAccessData.offset = indexRelativeToBase(offset);
1813 storageAccessData.identifierNumber = identifierNumber;
1814 m_graph.m_storageAccessData.append(storageAccessData);
1819 void ByteCodeParser::handleGetByOffset(
1820 int destinationOperand, SpeculatedType prediction, NodeIndex base, unsigned identifierNumber,
1821 PropertyOffset offset)
1823 set(destinationOperand, handleGetByOffset(prediction, base, identifierNumber, offset));
1826 void ByteCodeParser::handleGetById(
1827 int destinationOperand, SpeculatedType prediction, NodeIndex base, unsigned identifierNumber,
1828 const GetByIdStatus& getByIdStatus)
1830 if (!getByIdStatus.isSimple()
1831 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)) {
1832 set(destinationOperand,
1834 getByIdStatus.makesCalls() ? GetByIdFlush : GetById,
1835 OpInfo(identifierNumber), OpInfo(prediction), base));
1839 ASSERT(getByIdStatus.structureSet().size());
1841 // The implementation of GetByOffset does not know to terminate speculative
1842 // execution if it doesn't have a prediction, so we do it manually.
1843 if (prediction == SpecNone)
1844 addToGraph(ForceOSRExit);
1846 NodeIndex originalBaseForBaselineJIT = base;
1848 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(getByIdStatus.structureSet())), base);
1850 if (!getByIdStatus.chain().isEmpty()) {
1851 Structure* currentStructure = getByIdStatus.structureSet().singletonStructure();
1852 JSObject* currentObject = 0;
1853 for (unsigned i = 0; i < getByIdStatus.chain().size(); ++i) {
1854 currentObject = asObject(currentStructure->prototypeForLookup(m_inlineStackTop->m_codeBlock));
1855 currentStructure = getByIdStatus.chain()[i];
1856 base = addStructureTransitionCheck(currentObject, currentStructure);
1860 // Unless we want bugs like https://bugs.webkit.org/show_bug.cgi?id=88783, we need to
1861 // ensure that the base of the original get_by_id is kept alive until we're done with
1862 // all of the speculations. We only insert the Phantom if there had been a CheckStructure
1863 // on something other than the base following the CheckStructure on base, or if the
1864 // access was compiled to a WeakJSConstant specific value, in which case we might not
1865 // have any explicit use of the base at all.
1866 if (getByIdStatus.specificValue() || originalBaseForBaselineJIT != base)
1867 addToGraph(Phantom, originalBaseForBaselineJIT);
1869 if (getByIdStatus.specificValue()) {
1870 ASSERT(getByIdStatus.specificValue().isCell());
1872 set(destinationOperand, cellConstant(getByIdStatus.specificValue().asCell()));
1877 destinationOperand, prediction, base, identifierNumber, getByIdStatus.offset());
1880 void ByteCodeParser::prepareToParseBlock()
1882 for (unsigned i = 0; i < m_constants.size(); ++i)
1883 m_constants[i] = ConstantRecord();
1884 m_cellConstantNodes.clear();
1887 NodeIndex ByteCodeParser::getScope(bool skipTop, unsigned skipCount)
1889 NodeIndex localBase;
1890 if (m_inlineStackTop->m_inlineCallFrame) {
1891 ASSERT(m_inlineStackTop->m_inlineCallFrame->callee);
1892 localBase = cellConstant(m_inlineStackTop->m_inlineCallFrame->callee->scope());
1894 localBase = addToGraph(GetMyScope);
1896 ASSERT(!m_inlineStackTop->m_inlineCallFrame);
1897 localBase = addToGraph(SkipTopScope, localBase);
1899 for (unsigned n = skipCount; n--;)
1900 localBase = addToGraph(SkipScope, localBase);
1904 bool ByteCodeParser::parseResolveOperations(SpeculatedType prediction, unsigned identifier, unsigned operations, unsigned putToBaseOperation, NodeIndex* base, NodeIndex* value)
1906 ResolveOperations* resolveOperations = m_codeBlock->resolveOperations(operations);
1907 if (resolveOperations->isEmpty()) {
1908 addToGraph(ForceOSRExit);
1911 JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
1913 bool skipTop = false;
1914 bool skippedScopes = false;
1915 bool setBase = false;
1916 ResolveOperation* pc = resolveOperations->data();
1917 NodeIndex localBase = 0;
1918 bool resolvingBase = true;
1919 while (resolvingBase) {
1920 switch (pc->m_operation) {
1921 case ResolveOperation::ReturnGlobalObjectAsBase:
1922 *base = cellConstant(globalObject);
1926 case ResolveOperation::SetBaseToGlobal:
1927 *base = cellConstant(globalObject);
1929 resolvingBase = false;
1933 case ResolveOperation::SetBaseToUndefined:
1934 *base = constantUndefined();
1936 resolvingBase = false;
1940 case ResolveOperation::SetBaseToScope:
1941 localBase = getScope(skipTop, skipCount);
1945 resolvingBase = false;
1947 // Reset the scope skipping as we've already loaded it
1948 skippedScopes = false;
1951 case ResolveOperation::ReturnScopeAsBase:
1952 *base = getScope(skipTop, skipCount);
1956 case ResolveOperation::SkipTopScopeNode:
1957 ASSERT(!m_inlineStackTop->m_inlineCallFrame);
1959 skippedScopes = true;
1963 case ResolveOperation::SkipScopes:
1964 skipCount += pc->m_scopesToSkip;
1965 skippedScopes = true;
1969 case ResolveOperation::CheckForDynamicEntriesBeforeGlobalScope:
1972 case ResolveOperation::Fail:
1976 resolvingBase = false;
1980 localBase = getScope(skipTop, skipCount);
1982 if (base && !setBase)
1986 ResolveOperation* resolveValueOperation = pc;
1987 switch (resolveValueOperation->m_operation) {
1988 case ResolveOperation::GetAndReturnGlobalProperty: {
1989 ResolveGlobalStatus status = ResolveGlobalStatus::computeFor(m_inlineStackTop->m_profiledBlock, m_currentIndex, resolveValueOperation, m_codeBlock->identifier(identifier));
1990 if (status.isSimple()) {
1991 ASSERT(status.structure());
1993 NodeIndex globalObjectNode = addStructureTransitionCheck(globalObject, status.structure());
1995 if (status.specificValue()) {
1996 ASSERT(status.specificValue().isCell());
1997 *value = cellConstant(status.specificValue().asCell());
1999 *value = handleGetByOffset(prediction, globalObjectNode, identifier, status.offset());
2003 NodeIndex resolve = addToGraph(ResolveGlobal, OpInfo(m_graph.m_resolveGlobalData.size()), OpInfo(prediction));
2004 m_graph.m_resolveGlobalData.append(ResolveGlobalData());
2005 ResolveGlobalData& data = m_graph.m_resolveGlobalData.last();
2006 data.identifierNumber = identifier;
2007 data.resolveOperationsIndex = operations;
2008 data.putToBaseOperationIndex = putToBaseOperation;
2009 data.resolvePropertyIndex = resolveValueOperation - resolveOperations->data();
2013 case ResolveOperation::GetAndReturnGlobalVar: {
2014 *value = addToGraph(GetGlobalVar,
2015 OpInfo(globalObject->assertRegisterIsInThisObject(pc->m_registerAddress)),
2016 OpInfo(prediction));
2019 case ResolveOperation::GetAndReturnGlobalVarWatchable: {
2020 SpeculatedType prediction = getPrediction();
2022 JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
2024 Identifier ident = m_codeBlock->identifier(identifier);
2025 SymbolTableEntry entry = globalObject->symbolTable()->get(ident.impl());
2026 if (!entry.couldBeWatched()) {
2027 *value = addToGraph(GetGlobalVar, OpInfo(globalObject->assertRegisterIsInThisObject(pc->m_registerAddress)), OpInfo(prediction));
2031 // The watchpoint is still intact! This means that we will get notified if the
2032 // current value in the global variable changes. So, we can inline that value.
2033 // Moreover, currently we can assume that this value is a JSFunction*, which
2034 // implies that it's a cell. This simplifies things, since in general we'd have
2035 // to use a JSConstant for non-cells and a WeakJSConstant for cells. So instead
2036 // of having both cases we just assert that the value is a cell.
2038 // NB. If it wasn't for CSE, GlobalVarWatchpoint would have no need for the
2039 // register pointer. But CSE tracks effects on global variables by comparing
2040 // register pointers. Because CSE executes multiple times while the backend
2041 // executes once, we use the following performance trade-off:
2042 // - The node refers directly to the register pointer to make CSE super cheap.
2043 // - To perform backend code generation, the node only contains the identifier
2044 // number, from which it is possible to get (via a few average-time O(1)
2045 // lookups) to the WatchpointSet.
2047 addToGraph(GlobalVarWatchpoint, OpInfo(globalObject->assertRegisterIsInThisObject(pc->m_registerAddress)), OpInfo(identifier));
2049 JSValue specificValue = globalObject->registerAt(entry.getIndex()).get();
2050 ASSERT(specificValue.isCell());
2051 *value = cellConstant(specificValue.asCell());
2054 case ResolveOperation::GetAndReturnScopedVar: {
2055 NodeIndex getScopeRegisters = addToGraph(GetScopeRegisters, localBase);
2056 *value = addToGraph(GetScopedVar, OpInfo(resolveValueOperation->m_offset), OpInfo(prediction), getScopeRegisters);
2066 bool ByteCodeParser::parseBlock(unsigned limit)
2068 bool shouldContinueParsing = true;
2070 Interpreter* interpreter = m_globalData->interpreter;
2071 Instruction* instructionsBegin = m_inlineStackTop->m_codeBlock->instructions().begin();
2072 unsigned blockBegin = m_currentIndex;
2074 // If we are the first basic block, introduce markers for arguments. This allows
2075 // us to track if a use of an argument may use the actual argument passed, as
2076 // opposed to using a value we set explicitly.
2077 if (m_currentBlock == m_graph.m_blocks[0].get() && !m_inlineStackTop->m_inlineCallFrame) {
2078 m_graph.m_arguments.resize(m_numArguments);
2079 for (unsigned argument = 0; argument < m_numArguments; ++argument) {
2080 VariableAccessData* variable = newVariableAccessData(
2081 argumentToOperand(argument), m_codeBlock->isCaptured(argumentToOperand(argument)));
2082 variable->mergeStructureCheckHoistingFailed(
2083 m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
2084 NodeIndex setArgument = addToGraph(SetArgument, OpInfo(variable));
2085 m_graph.m_arguments[argument] = setArgument;
2086 m_currentBlock->variablesAtHead.setArgumentFirstTime(argument, setArgument);
2087 m_currentBlock->variablesAtTail.setArgumentFirstTime(argument, setArgument);
2092 m_currentProfilingIndex = m_currentIndex;
2094 // Don't extend over jump destinations.
2095 if (m_currentIndex == limit) {
2096 // Ordinarily we want to plant a jump. But refuse to do this if the block is
2097 // empty. This is a special case for inlining, which might otherwise create
2098 // some empty blocks in some cases. When parseBlock() returns with an empty
2099 // block, it will get repurposed instead of creating a new one. Note that this
2100 // logic relies on every bytecode resulting in one or more nodes, which would
2101 // be true anyway except for op_loop_hint, which emits a Phantom to force this
2103 if (!m_currentBlock->isEmpty())
2104 addToGraph(Jump, OpInfo(m_currentIndex));
2106 #if DFG_ENABLE(DEBUG_VERBOSE)
2107 dataLogF("Refusing to plant jump at limit %u because block %p is empty.\n", limit, m_currentBlock);
2110 return shouldContinueParsing;
2113 // Switch on the current bytecode opcode.
2114 Instruction* currentInstruction = instructionsBegin + m_currentIndex;
2115 m_currentInstruction = currentInstruction; // Some methods want to use this, and we'd rather not thread it through calls.
2116 OpcodeID opcodeID = interpreter->getOpcodeID(currentInstruction->u.opcode);
2119 // === Function entry opcodes ===
2122 // Initialize all locals to undefined.
2123 for (int i = 0; i < m_inlineStackTop->m_codeBlock->m_numVars; ++i)
2124 set(i, constantUndefined(), SetOnEntry);
2125 NEXT_OPCODE(op_enter);
2127 case op_convert_this: {
2128 NodeIndex op1 = getThis();
2129 if (m_graph[op1].op() != ConvertThis) {
2130 ValueProfile* profile =
2131 m_inlineStackTop->m_profiledBlock->valueProfileForBytecodeOffset(m_currentProfilingIndex);
2132 profile->computeUpdatedPrediction();
2133 #if DFG_ENABLE(DEBUG_VERBOSE)
2134 dataLogF("[@%lu bc#%u]: profile %p: ", m_graph.size(), m_currentProfilingIndex, profile);
2135 profile->dump(WTF::dataFile());
2138 if (profile->m_singletonValueIsTop
2139 || !profile->m_singletonValue
2140 || !profile->m_singletonValue.isCell()
2141 || profile->m_singletonValue.asCell()->classInfo() != &Structure::s_info)
2142 setThis(addToGraph(ConvertThis, op1));
2146 OpInfo(m_graph.addStructureSet(jsCast<Structure*>(profile->m_singletonValue.asCell()))),
2150 NEXT_OPCODE(op_convert_this);
2153 case op_create_this: {
2154 int calleeOperand = currentInstruction[2].u.operand;
2155 NodeIndex callee = get(calleeOperand);
2156 bool alreadyEmitted = false;
2157 if (m_graph[callee].op() == WeakJSConstant) {
2158 JSCell* cell = m_graph[callee].weakConstant();
2159 ASSERT(cell->inherits(&JSFunction::s_info));
2161 JSFunction* function = jsCast<JSFunction*>(cell);
2162 Structure* inheritorID = function->tryGetKnownInheritorID();
2164 addToGraph(InheritorIDWatchpoint, OpInfo(function));
2165 set(currentInstruction[1].u.operand, addToGraph(NewObject, OpInfo(inheritorID)));
2166 alreadyEmitted = true;
2169 if (!alreadyEmitted)
2170 set(currentInstruction[1].u.operand, addToGraph(CreateThis, callee));
2171 NEXT_OPCODE(op_create_this);
2174 case op_new_object: {
2175 set(currentInstruction[1].u.operand, addToGraph(NewObject, OpInfo(m_inlineStackTop->m_codeBlock->globalObject()->emptyObjectStructure())));
2176 NEXT_OPCODE(op_new_object);
2179 case op_new_array: {
2180 int startOperand = currentInstruction[2].u.operand;
2181 int numOperands = currentInstruction[3].u.operand;
2182 ArrayAllocationProfile* profile = currentInstruction[4].u.arrayAllocationProfile;
2183 for (int operandIdx = startOperand; operandIdx < startOperand + numOperands; ++operandIdx)
2184 addVarArgChild(get(operandIdx));
2185 set(currentInstruction[1].u.operand, addToGraph(Node::VarArg, NewArray, OpInfo(profile->selectIndexingType()), OpInfo(0)));
2186 NEXT_OPCODE(op_new_array);
2189 case op_new_array_with_size: {
2190 int lengthOperand = currentInstruction[2].u.operand;
2191 ArrayAllocationProfile* profile = currentInstruction[3].u.arrayAllocationProfile;
2192 set(currentInstruction[1].u.operand, addToGraph(NewArrayWithSize, OpInfo(profile->selectIndexingType()), get(lengthOperand)));
2193 NEXT_OPCODE(op_new_array_with_size);
2196 case op_new_array_buffer: {
2197 int startConstant = currentInstruction[2].u.operand;
2198 int numConstants = currentInstruction[3].u.operand;
2199 ArrayAllocationProfile* profile = currentInstruction[4].u.arrayAllocationProfile;
2200 NewArrayBufferData data;
2201 data.startConstant = m_inlineStackTop->m_constantBufferRemap[startConstant];
2202 data.numConstants = numConstants;
2203 data.indexingType = profile->selectIndexingType();
2205 // If this statement has never executed, we'll have the wrong indexing type in the profile.
2206 for (int i = 0; i < numConstants; ++i) {
2208 leastUpperBoundOfIndexingTypeAndValue(
2210 m_codeBlock->constantBuffer(data.startConstant)[i]);
2213 m_graph.m_newArrayBufferData.append(data);
2214 set(currentInstruction[1].u.operand, addToGraph(NewArrayBuffer, OpInfo(&m_graph.m_newArrayBufferData.last())));
2215 NEXT_OPCODE(op_new_array_buffer);
2218 case op_new_regexp: {
2219 set(currentInstruction[1].u.operand, addToGraph(NewRegexp, OpInfo(currentInstruction[2].u.operand)));
2220 NEXT_OPCODE(op_new_regexp);
2223 case op_get_callee: {
2224 ValueProfile* profile = currentInstruction[2].u.profile;
2225 profile->computeUpdatedPrediction();
2226 if (profile->m_singletonValueIsTop
2227 || !profile->m_singletonValue
2228 || !profile->m_singletonValue.isCell())
2229 set(currentInstruction[1].u.operand, get(JSStack::Callee));
2231 ASSERT(profile->m_singletonValue.asCell()->inherits(&JSFunction::s_info));
2232 NodeIndex actualCallee = get(JSStack::Callee);
2233 addToGraph(CheckFunction, OpInfo(profile->m_singletonValue.asCell()), actualCallee);
2234 set(currentInstruction[1].u.operand, addToGraph(WeakJSConstant, OpInfo(profile->m_singletonValue.asCell())));
2236 NEXT_OPCODE(op_get_callee);
2239 // === Bitwise operations ===
2242 NodeIndex op1 = getToInt32(currentInstruction[2].u.operand);
2243 NodeIndex op2 = getToInt32(currentInstruction[3].u.operand);
2244 set(currentInstruction[1].u.operand, addToGraph(BitAnd, op1, op2));
2245 NEXT_OPCODE(op_bitand);
2249 NodeIndex op1 = getToInt32(currentInstruction[2].u.operand);
2250 NodeIndex op2 = getToInt32(currentInstruction[3].u.operand);
2251 set(currentInstruction[1].u.operand, addToGraph(BitOr, op1, op2));
2252 NEXT_OPCODE(op_bitor);
2256 NodeIndex op1 = getToInt32(currentInstruction[2].u.operand);
2257 NodeIndex op2 = getToInt32(currentInstruction[3].u.operand);
2258 set(currentInstruction[1].u.operand, addToGraph(BitXor, op1, op2));
2259 NEXT_OPCODE(op_bitxor);
2263 NodeIndex op1 = getToInt32(currentInstruction[2].u.operand);
2264 NodeIndex op2 = getToInt32(currentInstruction[3].u.operand);
2266 // Optimize out shifts by zero.
2267 if (isInt32Constant(op2) && !(valueOfInt32Constant(op2) & 0x1f))
2270 result = addToGraph(BitRShift, op1, op2);
2271 set(currentInstruction[1].u.operand, result);
2272 NEXT_OPCODE(op_rshift);
2276 NodeIndex op1 = getToInt32(currentInstruction[2].u.operand);
2277 NodeIndex op2 = getToInt32(currentInstruction[3].u.operand);
2279 // Optimize out shifts by zero.
2280 if (isInt32Constant(op2) && !(valueOfInt32Constant(op2) & 0x1f))
2283 result = addToGraph(BitLShift, op1, op2);
2284 set(currentInstruction[1].u.operand, result);
2285 NEXT_OPCODE(op_lshift);
2289 NodeIndex op1 = getToInt32(currentInstruction[2].u.operand);
2290 NodeIndex op2 = getToInt32(currentInstruction[3].u.operand);
2292 // The result of a zero-extending right shift is treated as an unsigned value.
2293 // This means that if the top bit is set, the result is not in the int32 range,
2294 // and as such must be stored as a double. If the shift amount is a constant,
2295 // we may be able to optimize.
2296 if (isInt32Constant(op2)) {
2297 // If we know we are shifting by a non-zero amount, then since the operation
2298 // zero fills we know the top bit of the result must be zero, and as such the
2299 // result must be within the int32 range. Conversely, if this is a shift by
2300 // zero, then the result may be changed by the conversion to unsigned, but it
2301 // is not necessary to perform the shift!
2302 if (valueOfInt32Constant(op2) & 0x1f)
2303 result = addToGraph(BitURShift, op1, op2);
2305 result = makeSafe(addToGraph(UInt32ToNumber, op1));
2307 // Cannot optimize at this stage; shift & potentially rebox as a double.
2308 result = addToGraph(BitURShift, op1, op2);
2309 result = makeSafe(addToGraph(UInt32ToNumber, result));
2311 set(currentInstruction[1].u.operand, result);
2312 NEXT_OPCODE(op_urshift);
2315 // === Increment/Decrement opcodes ===
2318 unsigned srcDst = currentInstruction[1].u.operand;
2319 NodeIndex op = get(srcDst);
2320 set(srcDst, makeSafe(addToGraph(ArithAdd, op, one())));
2321 NEXT_OPCODE(op_pre_inc);
2325 unsigned result = currentInstruction[1].u.operand;
2326 unsigned srcDst = currentInstruction[2].u.operand;
2327 ASSERT(result != srcDst); // Required for assumptions we make during OSR.
2328 NodeIndex op = get(srcDst);
2329 setPair(result, op, srcDst, makeSafe(addToGraph(ArithAdd, op, one())));
2330 NEXT_OPCODE(op_post_inc);
2334 unsigned srcDst = currentInstruction[1].u.operand;
2335 NodeIndex op = get(srcDst);
2336 set(srcDst, makeSafe(addToGraph(ArithSub, op, one())));
2337 NEXT_OPCODE(op_pre_dec);
2341 unsigned result = currentInstruction[1].u.operand;
2342 unsigned srcDst = currentInstruction[2].u.operand;
2343 NodeIndex op = get(srcDst);
2344 setPair(result, op, srcDst, makeSafe(addToGraph(ArithSub, op, one())));
2345 NEXT_OPCODE(op_post_dec);
2348 // === Arithmetic operations ===
2351 NodeIndex op1 = get(currentInstruction[2].u.operand);
2352 NodeIndex op2 = get(currentInstruction[3].u.operand);
2353 if (m_graph[op1].hasNumberResult() && m_graph[op2].hasNumberResult())
2354 set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithAdd, op1, op2)));
2356 set(currentInstruction[1].u.operand, makeSafe(addToGraph(ValueAdd, op1, op2)));
2357 NEXT_OPCODE(op_add);
2361 NodeIndex op1 = get(currentInstruction[2].u.operand);
2362 NodeIndex op2 = get(currentInstruction[3].u.operand);
2363 set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithSub, op1, op2)));
2364 NEXT_OPCODE(op_sub);
2368 NodeIndex op1 = get(currentInstruction[2].u.operand);
2369 set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithNegate, op1)));
2370 NEXT_OPCODE(op_negate);
2374 // Multiply requires that the inputs are not truncated, unfortunately.
2375 NodeIndex op1 = get(currentInstruction[2].u.operand);
2376 NodeIndex op2 = get(currentInstruction[3].u.operand);
2377 set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithMul, op1, op2)));
2378 NEXT_OPCODE(op_mul);
2382 NodeIndex op1 = get(currentInstruction[2].u.operand);
2383 NodeIndex op2 = get(currentInstruction[3].u.operand);
2384 set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithMod, op1, op2)));
2385 NEXT_OPCODE(op_mod);
2389 NodeIndex op1 = get(currentInstruction[2].u.operand);
2390 NodeIndex op2 = get(currentInstruction[3].u.operand);
2391 set(currentInstruction[1].u.operand, makeDivSafe(addToGraph(ArithDiv, op1, op2)));
2392 NEXT_OPCODE(op_div);
2395 // === Misc operations ===
2397 #if ENABLE(DEBUG_WITH_BREAKPOINT)
2399 addToGraph(Breakpoint);
2400 NEXT_OPCODE(op_debug);
2403 NodeIndex op = get(currentInstruction[2].u.operand);
2404 set(currentInstruction[1].u.operand, op);
2405 NEXT_OPCODE(op_mov);
2408 case op_check_has_instance:
2409 addToGraph(CheckHasInstance, get(currentInstruction[3].u.operand));
2410 NEXT_OPCODE(op_check_has_instance);
2412 case op_instanceof: {
2413 NodeIndex value = get(currentInstruction[2].u.operand);
2414 NodeIndex prototype = get(currentInstruction[3].u.operand);
2415 set(currentInstruction[1].u.operand, addToGraph(InstanceOf, value, prototype));
2416 NEXT_OPCODE(op_instanceof);
2419 case op_is_undefined: {
2420 NodeIndex value = get(currentInstruction[2].u.operand);
2421 set(currentInstruction[1].u.operand, addToGraph(IsUndefined, value));
2422 NEXT_OPCODE(op_is_undefined);
2425 case op_is_boolean: {
2426 NodeIndex value = get(currentInstruction[2].u.operand);
2427 set(currentInstruction[1].u.operand, addToGraph(IsBoolean, value));
2428 NEXT_OPCODE(op_is_boolean);
2431 case op_is_number: {
2432 NodeIndex value = get(currentInstruction[2].u.operand);
2433 set(currentInstruction[1].u.operand, addToGraph(IsNumber, value));
2434 NEXT_OPCODE(op_is_number);
2437 case op_is_string: {
2438 NodeIndex value = get(currentInstruction[2].u.operand);
2439 set(currentInstruction[1].u.operand, addToGraph(IsString, value));
2440 NEXT_OPCODE(op_is_string);
2443 case op_is_object: {
2444 NodeIndex value = get(currentInstruction[2].u.operand);
2445 set(currentInstruction[1].u.operand, addToGraph(IsObject, value));
2446 NEXT_OPCODE(op_is_object);
2449 case op_is_function: {
2450 NodeIndex value = get(currentInstruction[2].u.operand);
2451 set(currentInstruction[1].u.operand, addToGraph(IsFunction, value));
2452 NEXT_OPCODE(op_is_function);
2456 NodeIndex value = get(currentInstruction[2].u.operand);
2457 set(currentInstruction[1].u.operand, addToGraph(LogicalNot, value));
2458 NEXT_OPCODE(op_not);
2461 case op_to_primitive: {
2462 NodeIndex value = get(currentInstruction[2].u.operand);
2463 set(currentInstruction[1].u.operand, addToGraph(ToPrimitive, value));
2464 NEXT_OPCODE(op_to_primitive);
2468 int startOperand = currentInstruction[2].u.operand;
2469 int numOperands = currentInstruction[3].u.operand;
2470 for (int operandIdx = startOperand; operandIdx < startOperand + numOperands; ++operandIdx)
2471 addVarArgChild(get(operandIdx));
2472 set(currentInstruction[1].u.operand, addToGraph(Node::VarArg, StrCat, OpInfo(0), OpInfo(0)));
2473 NEXT_OPCODE(op_strcat);
2477 NodeIndex op1 = get(currentInstruction[2].u.operand);
2478 NodeIndex op2 = get(currentInstruction[3].u.operand);
2479 set(currentInstruction[1].u.operand, addToGraph(CompareLess, op1, op2));
2480 NEXT_OPCODE(op_less);
2484 NodeIndex op1 = get(currentInstruction[2].u.operand);
2485 NodeIndex op2 = get(currentInstruction[3].u.operand);
2486 set(currentInstruction[1].u.operand, addToGraph(CompareLessEq, op1, op2));
2487 NEXT_OPCODE(op_lesseq);
2491 NodeIndex op1 = get(currentInstruction[2].u.operand);
2492 NodeIndex op2 = get(currentInstruction[3].u.operand);
2493 set(currentInstruction[1].u.operand, addToGraph(CompareGreater, op1, op2));
2494 NEXT_OPCODE(op_greater);
2497 case op_greatereq: {
2498 NodeIndex op1 = get(currentInstruction[2].u.operand);
2499 NodeIndex op2 = get(currentInstruction[3].u.operand);
2500 set(currentInstruction[1].u.operand, addToGraph(CompareGreaterEq, op1, op2));
2501 NEXT_OPCODE(op_greatereq);
2505 NodeIndex op1 = get(currentInstruction[2].u.operand);
2506 NodeIndex op2 = get(currentInstruction[3].u.operand);
2507 set(currentInstruction[1].u.operand, addToGraph(CompareEq, op1, op2));
2512 NodeIndex value = get(currentInstruction[2].u.operand);
2513 set(currentInstruction[1].u.operand, addToGraph(CompareEq, value, constantNull()));
2514 NEXT_OPCODE(op_eq_null);
2518 NodeIndex op1 = get(currentInstruction[2].u.operand);
2519 NodeIndex op2 = get(currentInstruction[3].u.operand);
2520 set(currentInstruction[1].u.operand, addToGraph(CompareStrictEq, op1, op2));
2521 NEXT_OPCODE(op_stricteq);
2525 NodeIndex op1 = get(currentInstruction[2].u.operand);
2526 NodeIndex op2 = get(currentInstruction[3].u.operand);
2527 set(currentInstruction[1].u.operand, addToGraph(LogicalNot, addToGraph(CompareEq, op1, op2)));
2528 NEXT_OPCODE(op_neq);
2532 NodeIndex value = get(currentInstruction[2].u.operand);
2533 set(currentInstruction[1].u.operand, addToGraph(LogicalNot, addToGraph(CompareEq, value, constantNull())));
2534 NEXT_OPCODE(op_neq_null);
2537 case op_nstricteq: {
2538 NodeIndex op1 = get(currentInstruction[2].u.operand);
2539 NodeIndex op2 = get(currentInstruction[3].u.operand);
2540 set(currentInstruction[1].u.operand, addToGraph(LogicalNot, addToGraph(CompareStrictEq, op1, op2)));
2541 NEXT_OPCODE(op_nstricteq);
2544 // === Property access operations ===
2546 case op_get_by_val: {
2547 SpeculatedType prediction = getPrediction();
2549 NodeIndex base = get(currentInstruction[2].u.operand);
2550 ArrayMode arrayMode = getArrayModeAndEmitChecks(currentInstruction[4].u.arrayProfile, Array::Read, base);
2551 NodeIndex property = get(currentInstruction[3].u.operand);
2552 NodeIndex getByVal = addToGraph(GetByVal, OpInfo(arrayMode.asWord()), OpInfo(prediction), base, property);
2553 set(currentInstruction[1].u.operand, getByVal);
2555 NEXT_OPCODE(op_get_by_val);
2558 case op_put_by_val: {
2559 NodeIndex base = get(currentInstruction[1].u.operand);
2561 ArrayMode arrayMode = getArrayModeAndEmitChecks(currentInstruction[4].u.arrayProfile, Array::Write, base);
2563 NodeIndex property = get(currentInstruction[2].u.operand);
2564 NodeIndex value = get(currentInstruction[3].u.operand);
2566 addVarArgChild(base);
2567 addVarArgChild(property);
2568 addVarArgChild(value);
2569 addVarArgChild(NoNode); // Leave room for property storage.
2570 addToGraph(Node::VarArg, PutByVal, OpInfo(arrayMode.asWord()), OpInfo(0));
2572 NEXT_OPCODE(op_put_by_val);
2576 case op_get_by_id_out_of_line:
2577 case op_get_array_length: {
2578 SpeculatedType prediction = getPrediction();
2580 NodeIndex base = get(currentInstruction[2].u.operand);
2581 unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand];
2583 Identifier identifier = m_codeBlock->identifier(identifierNumber);
2584 GetByIdStatus getByIdStatus = GetByIdStatus::computeFor(
2585 m_inlineStackTop->m_profiledBlock, m_currentIndex, identifier);
2588 currentInstruction[1].u.operand, prediction, base, identifierNumber, getByIdStatus);
2590 NEXT_OPCODE(op_get_by_id);
2593 case op_put_by_id_out_of_line:
2594 case op_put_by_id_transition_direct:
2595 case op_put_by_id_transition_normal:
2596 case op_put_by_id_transition_direct_out_of_line:
2597 case op_put_by_id_transition_normal_out_of_line: {
2598 NodeIndex value = get(currentInstruction[3].u.operand);
2599 NodeIndex base = get(currentInstruction[1].u.operand);
2600 unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
2601 bool direct = currentInstruction[8].u.operand;
2603 PutByIdStatus putByIdStatus = PutByIdStatus::computeFor(
2604 m_inlineStackTop->m_profiledBlock,
2606 m_codeBlock->identifier(identifierNumber));
2607 if (!putByIdStatus.isSet())
2608 addToGraph(ForceOSRExit);
2610 bool hasExitSite = m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache);
2612 if (!hasExitSite && putByIdStatus.isSimpleReplace()) {
2613 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(putByIdStatus.oldStructure())), base);
2614 NodeIndex propertyStorage;
2615 if (isInlineOffset(putByIdStatus.offset()))
2616 propertyStorage = base;
2618 propertyStorage = addToGraph(GetButterfly, base);
2619 addToGraph(PutByOffset, OpInfo(m_graph.m_storageAccessData.size()), propertyStorage, base, value);
2621 StorageAccessData storageAccessData;
2622 storageAccessData.offset = indexRelativeToBase(putByIdStatus.offset());
2623 storageAccessData.identifierNumber = identifierNumber;
2624 m_graph.m_storageAccessData.append(storageAccessData);
2625 } else if (!hasExitSite
2626 && putByIdStatus.isSimpleTransition()
2627 && structureChainIsStillValid(
2629 putByIdStatus.oldStructure(),
2630 putByIdStatus.structureChain())) {
2632 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(putByIdStatus.oldStructure())), base);
2634 if (!putByIdStatus.oldStructure()->storedPrototype().isNull()) {
2635 addStructureTransitionCheck(
2636 putByIdStatus.oldStructure()->storedPrototype().asCell());
2639 for (WriteBarrier<Structure>* it = putByIdStatus.structureChain()->head(); *it; ++it) {
2640 JSValue prototype = (*it)->storedPrototype();
2641 if (prototype.isNull())
2643 ASSERT(prototype.isCell());
2644 addStructureTransitionCheck(prototype.asCell());
2647 ASSERT(putByIdStatus.oldStructure()->transitionWatchpointSetHasBeenInvalidated());
2649 NodeIndex propertyStorage;
2650 StructureTransitionData* transitionData =
2651 m_graph.addStructureTransitionData(
2652 StructureTransitionData(
2653 putByIdStatus.oldStructure(),
2654 putByIdStatus.newStructure()));
2656 if (putByIdStatus.oldStructure()->outOfLineCapacity()
2657 != putByIdStatus.newStructure()->outOfLineCapacity()) {
2659 // If we're growing the property storage then it must be because we're
2660 // storing into the out-of-line storage.
2661 ASSERT(!isInlineOffset(putByIdStatus.offset()));
2663 if (!putByIdStatus.oldStructure()->outOfLineCapacity()) {
2664 propertyStorage = addToGraph(
2665 AllocatePropertyStorage, OpInfo(transitionData), base);
2667 propertyStorage = addToGraph(
2668 ReallocatePropertyStorage, OpInfo(transitionData),
2669 base, addToGraph(GetButterfly, base));
2672 if (isInlineOffset(putByIdStatus.offset()))
2673 propertyStorage = base;
2675 propertyStorage = addToGraph(GetButterfly, base);
2678 addToGraph(PutStructure, OpInfo(transitionData), base);
2682 OpInfo(m_graph.m_storageAccessData.size()),
2687 StorageAccessData storageAccessData;
2688 storageAccessData.offset = indexRelativeToBase(putByIdStatus.offset());
2689 storageAccessData.identifierNumber = identifierNumber;
2690 m_graph.m_storageAccessData.append(storageAccessData);
2693 addToGraph(PutByIdDirect, OpInfo(identifierNumber), base, value);
2695 addToGraph(PutById, OpInfo(identifierNumber), base, value);
2698 NEXT_OPCODE(op_put_by_id);
2701 case op_init_global_const_nop: {
2702 NEXT_OPCODE(op_init_global_const_nop);
2705 case op_init_global_const: {
2706 NodeIndex value = get(currentInstruction[2].u.operand);
2709 OpInfo(m_inlineStackTop->m_codeBlock->globalObject()->assertRegisterIsInThisObject(currentInstruction[1].u.registerPointer)),
2711 NEXT_OPCODE(op_init_global_const);
2714 case op_init_global_const_check: {
2715 NodeIndex value = get(currentInstruction[2].u.operand);
2716 CodeBlock* codeBlock = m_inlineStackTop->m_codeBlock;
2717 JSGlobalObject* globalObject = codeBlock->globalObject();
2718 unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[4].u.operand];
2719 Identifier identifier = m_codeBlock->identifier(identifierNumber);
2720 SymbolTableEntry entry = globalObject->symbolTable()->get(identifier.impl());
2721 if (!entry.couldBeWatched()) {
2724 OpInfo(globalObject->assertRegisterIsInThisObject(currentInstruction[1].u.registerPointer)),
2726 NEXT_OPCODE(op_init_global_const_check);
2730 OpInfo(codeBlock->globalObject()->assertRegisterIsInThisObject(currentInstruction[1].u.registerPointer)),
2731 OpInfo(identifierNumber),
2733 NEXT_OPCODE(op_init_global_const_check);
2737 // === Block terminators. ===
2740 unsigned relativeOffset = currentInstruction[1].u.operand;
2741 addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
2742 LAST_OPCODE(op_jmp);
2746 unsigned relativeOffset = currentInstruction[1].u.operand;
2747 addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
2748 LAST_OPCODE(op_loop);
2752 unsigned relativeOffset = currentInstruction[2].u.operand;
2753 NodeIndex condition = get(currentInstruction[1].u.operand);
2754 addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jtrue)), condition);
2755 LAST_OPCODE(op_jtrue);
2759 unsigned relativeOffset = currentInstruction[2].u.operand;
2760 NodeIndex condition = get(currentInstruction[1].u.operand);
2761 addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jfalse)), OpInfo(m_currentIndex + relativeOffset), condition);
2762 LAST_OPCODE(op_jfalse);
2765 case op_loop_if_true: {
2766 unsigned relativeOffset = currentInstruction[2].u.operand;
2767 NodeIndex condition = get(currentInstruction[1].u.operand);
2768 addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_loop_if_true)), condition);
2769 LAST_OPCODE(op_loop_if_true);
2772 case op_loop_if_false: {
2773 unsigned relativeOffset = currentInstruction[2].u.operand;
2774 NodeIndex condition = get(currentInstruction[1].u.operand);
2775 addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_loop_if_false)), OpInfo(m_currentIndex + relativeOffset), condition);
2776 LAST_OPCODE(op_loop_if_false);
2780 unsigned relativeOffset = currentInstruction[2].u.operand;
2781 NodeIndex value = get(currentInstruction[1].u.operand);
2782 NodeIndex condition = addToGraph(CompareEq, value, constantNull());
2783 addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jeq_null)), condition);
2784 LAST_OPCODE(op_jeq_null);
2787 case op_jneq_null: {
2788 unsigned relativeOffset = currentInstruction[2].u.operand;
2789 NodeIndex value = get(currentInstruction[1].u.operand);
2790 NodeIndex condition = addToGraph(CompareEq, value, constantNull());
2791 addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jneq_null)), OpInfo(m_currentIndex + relativeOffset), condition);
2792 LAST_OPCODE(op_jneq_null);
2796 unsigned relativeOffset = currentInstruction[3].u.operand;
2797 NodeIndex op1 = get(currentInstruction[1].u.operand);
2798 NodeIndex op2 = get(currentInstruction[2].u.operand);
2799 NodeIndex condition = addToGraph(CompareLess, op1, op2);
2800 addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jless)), condition);
2801 LAST_OPCODE(op_jless);
2805 unsigned relativeOffset = currentInstruction[3].u.operand;
2806 NodeIndex op1 = get(currentInstruction[1].u.operand);
2807 NodeIndex op2 = get(currentInstruction[2].u.operand);
2808 NodeIndex condition = addToGraph(CompareLessEq, op1, op2);
2809 addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jlesseq)), condition);
2810 LAST_OPCODE(op_jlesseq);
2814 unsigned relativeOffset = currentInstruction[3].u.operand;
2815 NodeIndex op1 = get(currentInstruction[1].u.operand);
2816 NodeIndex op2 = get(currentInstruction[2].u.operand);
2817 NodeIndex condition = addToGraph(CompareGreater, op1, op2);
2818 addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jgreater)), condition);
2819 LAST_OPCODE(op_jgreater);
2822 case op_jgreatereq: {
2823 unsigned relativeOffset = currentInstruction[3].u.operand;
2824 NodeIndex op1 = get(currentInstruction[1].u.operand);
2825 NodeIndex op2 = get(currentInstruction[2].u.operand);
2826 NodeIndex condition = addToGraph(CompareGreaterEq, op1, op2);
2827 addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jgreatereq)), condition);
2828 LAST_OPCODE(op_jgreatereq);
2832 unsigned relativeOffset = currentInstruction[3].u.operand;
2833 NodeIndex op1 = get(currentInstruction[1].u.operand);
2834 NodeIndex op2 = get(currentInstruction[2].u.operand);
2835 NodeIndex condition = addToGraph(CompareLess, op1, op2);
2836 addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jnless)), OpInfo(m_currentIndex + relativeOffset), condition);
2837 LAST_OPCODE(op_jnless);
2841 unsigned relativeOffset = currentInstruction[3].u.operand;
2842 NodeIndex op1 = get(currentInstruction[1].u.operand);
2843 NodeIndex op2 = get(currentInstruction[2].u.operand);
2844 NodeIndex condition = addToGraph(CompareLessEq, op1, op2);
2845 addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jnlesseq)), OpInfo(m_currentIndex + relativeOffset), condition);
2846 LAST_OPCODE(op_jnlesseq);
2849 case op_jngreater: {
2850 unsigned relativeOffset = currentInstruction[3].u.operand;
2851 NodeIndex op1 = get(currentInstruction[1].u.operand);
2852 NodeIndex op2 = get(currentInstruction[2].u.operand);
2853 NodeIndex condition = addToGraph(CompareGreater, op1, op2);
2854 addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jngreater)), OpInfo(m_currentIndex + relativeOffset), condition);
2855 LAST_OPCODE(op_jngreater);
2858 case op_jngreatereq: {
2859 unsigned relativeOffset = currentInstruction[3].u.operand;
2860 NodeIndex op1 = get(currentInstruction[1].u.operand);
2861 NodeIndex op2 = get(currentInstruction[2].u.operand);
2862 NodeIndex condition = addToGraph(CompareGreaterEq, op1, op2);
2863 addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jngreatereq)), OpInfo(m_currentIndex + relativeOffset), condition);
2864 LAST_OPCODE(op_jngreatereq);
2867 case op_loop_if_less: {
2868 unsigned relativeOffset = currentInstruction[3].u.operand;
2869 NodeIndex op1 = get(currentInstruction[1].u.operand);
2870 NodeIndex op2 = get(currentInstruction[2].u.operand);
2871 NodeIndex condition = addToGraph(CompareLess, op1, op2);
2872 addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_loop_if_less)), condition);
2873 LAST_OPCODE(op_loop_if_less);
2876 case op_loop_if_lesseq: {
2877 unsigned relativeOffset = currentInstruction[3].u.operand;
2878 NodeIndex op1 = get(currentInstruction[1].u.operand);
2879 NodeIndex op2 = get(currentInstruction[2].u.operand);
2880 NodeIndex condition = addToGraph(CompareLessEq, op1, op2);
2881 addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_loop_if_lesseq)), condition);
2882 LAST_OPCODE(op_loop_if_lesseq);
2885 case op_loop_if_greater: {
2886 unsigned relativeOffset = currentInstruction[3].u.operand;
2887 NodeIndex op1 = get(currentInstruction[1].u.operand);
2888 NodeIndex op2 = get(currentInstruction[2].u.operand);
2889 NodeIndex condition = addToGraph(CompareGreater, op1, op2);
2890 addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_loop_if_greater)), condition);
2891 LAST_OPCODE(op_loop_if_greater);
2894 case op_loop_if_greatereq: {
2895 unsigned relativeOffset = currentInstruction[3].u.operand;
2896 NodeIndex op1 = get(currentInstruction[1].u.operand);
2897 NodeIndex op2 = get(currentInstruction[2].u.operand);
2898 NodeIndex condition = addToGraph(CompareGreaterEq, op1, op2);
2899 addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_loop_if_greatereq)), condition);
2900 LAST_OPCODE(op_loop_if_greatereq);
2904 flushArgumentsAndCapturedVariables();
2905 if (m_inlineStackTop->m_inlineCallFrame) {
2906 if (m_inlineStackTop->m_returnValue != InvalidVirtualRegister)
2907 setDirect(m_inlineStackTop->m_returnValue, get(currentInstruction[1].u.operand));
2908 m_inlineStackTop->m_didReturn = true;
2909 if (m_inlineStackTop->m_unlinkedBlocks.isEmpty()) {
2910 // If we're returning from the first block, then we're done parsing.
2911 ASSERT(m_inlineStackTop->m_callsiteBlockHead == m_graph.m_blocks.size() - 1);
2912 shouldContinueParsing = false;
2913 LAST_OPCODE(op_ret);
2915 // If inlining created blocks, and we're doing a return, then we need some
2917 ASSERT(m_inlineStackTop->m_unlinkedBlocks.last().m_blockIndex == m_graph.m_blocks.size() - 1);
2918 m_inlineStackTop->m_unlinkedBlocks.last().m_needsNormalLinking = false;
2920 if (m_currentIndex + OPCODE_LENGTH(op_ret) != m_inlineStackTop->m_codeBlock->instructions().size() || m_inlineStackTop->m_didEarlyReturn) {
2921 ASSERT(m_currentIndex + OPCODE_LENGTH(op_ret) <= m_inlineStackTop->m_codeBlock->instructions().size());
2922 addToGraph(Jump, OpInfo(NoBlock));
2923 m_inlineStackTop->m_unlinkedBlocks.last().m_needsEarlyReturnLinking = true;
2924 m_inlineStackTop->m_didEarlyReturn = true;
2926 LAST_OPCODE(op_ret);
2928 addToGraph(Return, get(currentInstruction[1].u.operand));
2929 LAST_OPCODE(op_ret);
2932 flushArgumentsAndCapturedVariables();
2933 ASSERT(!m_inlineStackTop->m_inlineCallFrame);
2934 addToGraph(Return, get(currentInstruction[1].u.operand));
2935 LAST_OPCODE(op_end);
2938 flushArgumentsAndCapturedVariables();
2939 addToGraph(Throw, get(currentInstruction[1].u.operand));
2940 LAST_OPCODE(op_throw);
2942 case op_throw_static_error:
2943 flushArgumentsAndCapturedVariables();
2944 addToGraph(ThrowReferenceError);
2945 LAST_OPCODE(op_throw_static_error);
2948 handleCall(interpreter, currentInstruction, Call, CodeForCall);
2949 NEXT_OPCODE(op_call);
2952 handleCall(interpreter, currentInstruction, Construct, CodeForConstruct);
2953 NEXT_OPCODE(op_construct);
2955 case op_call_varargs: {
2956 ASSERT(m_inlineStackTop->m_inlineCallFrame);
2957 ASSERT(currentInstruction[3].u.operand == m_inlineStackTop->m_codeBlock->argumentsRegister());
2958 ASSERT(!m_inlineStackTop->m_codeBlock->symbolTable()->slowArguments());
2959 // It would be cool to funnel this into handleCall() so that it can handle
2960 // inlining. But currently that won't be profitable anyway, since none of the
2961 // uses of call_varargs will be inlineable. So we set this up manually and
2962 // without inline/intrinsic detection.
2964 Instruction* putInstruction = currentInstruction + OPCODE_LENGTH(op_call_varargs);
2966 SpeculatedType prediction = SpecNone;
2967 if (interpreter->getOpcodeID(putInstruction->u.opcode) == op_call_put_result) {
2968 m_currentProfilingIndex = m_currentIndex + OPCODE_LENGTH(op_call_varargs);
2969 prediction = getPrediction();
2972 addToGraph(CheckArgumentsNotCreated);
2974 unsigned argCount = m_inlineStackTop->m_inlineCallFrame->arguments.size();
2975 if (JSStack::CallFrameHeaderSize + argCount > m_parameterSlots)
2976 m_parameterSlots = JSStack::CallFrameHeaderSize + argCount;
2978 addVarArgChild(get(currentInstruction[1].u.operand)); // callee
2979 addVarArgChild(get(currentInstruction[2].u.operand)); // this
2980 for (unsigned argument = 1; argument < argCount; ++argument)
2981 addVarArgChild(get(argumentToOperand(argument)));
2983 NodeIndex call = addToGraph(Node::VarArg, Call, OpInfo(0), OpInfo(prediction));
2984 if (interpreter->getOpcodeID(putInstruction->u.opcode) == op_call_put_result)
2985 set(putInstruction[1].u.operand, call);
2987 NEXT_OPCODE(op_call_varargs);
2990 case op_call_put_result:
2991 NEXT_OPCODE(op_call_put_result);
2994 // Statically speculate for now. It makes sense to let speculate-only jneq_ptr
2995 // support simmer for a while before making it more general, since it's
2996 // already gnarly enough as it is.
2997 ASSERT(pointerIsFunction(currentInstruction[2].u.specialPointer));
3000 OpInfo(actualPointerFor(m_inlineStackTop->m_codeBlock, currentInstruction[2].u.specialPointer)),
3001 get(currentInstruction[1].u.operand));
3002 addToGraph(Jump, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jneq_ptr)));
3003 LAST_OPCODE(op_jneq_ptr);
3006 case op_resolve_global_property:
3007 case op_resolve_global_var:
3008 case op_resolve_scoped_var:
3009 case op_resolve_scoped_var_on_top_scope:
3010 case op_resolve_scoped_var_with_top_scope_check: {
3011 SpeculatedType prediction = getPrediction();
3013 unsigned identifier = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
3014 unsigned operations = m_inlineStackTop->m_resolveOperationRemap[currentInstruction[3].u.operand];
3015 NodeIndex value = 0;
3016 if (parseResolveOperations(prediction, identifier, operations, 0, 0, &value)) {
3017 set(currentInstruction[1].u.operand, value);
3018 NEXT_OPCODE(op_resolve);
3021 NodeIndex resolve = addToGraph(Resolve, OpInfo(m_graph.m_resolveOperationsData.size()), OpInfo(prediction));
3022 m_graph.m_resolveOperationsData.append(ResolveOperationData());
3023 ResolveOperationData& data = m_graph.m_resolveOperationsData.last();
3024 data.identifierNumber = identifier;
3025 data.resolveOperationsIndex = operations;
3027 set(currentInstruction[1].u.operand, resolve);
3029 NEXT_OPCODE(op_resolve);
3032 case op_put_to_base_variable:
3033 case op_put_to_base: {
3034 unsigned base = currentInstruction[1].u.operand;
3035 unsigned identifier = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
3036 unsigned value = currentInstruction[3].u.operand;
3037 unsigned operation = m_inlineStackTop->m_putToBaseOperationRemap[currentInstruction[4].u.operand];
3038 PutToBaseOperation* putToBase = m_codeBlock->putToBaseOperation(operation);
3040 if (putToBase->m_isDynamic) {
3041 addToGraph(Phantom, get(base));
3042 addToGraph(PutById, OpInfo(identifier), get(base), get(value));
3043 NEXT_OPCODE(op_put_to_base);
3046 switch (putToBase->m_kind) {
3047 case PutToBaseOperation::Uninitialised:
3048 addToGraph(Phantom, get(base));
3049 addToGraph(ForceOSRExit);
3052 case PutToBaseOperation::GlobalVariablePutChecked: {
3053 CodeBlock* codeBlock = m_inlineStackTop->m_codeBlock;
3054 JSGlobalObject* globalObject = codeBlock->globalObject();
3055 SymbolTableEntry entry = globalObject->symbolTable()->get(m_codeBlock->identifier(identifier).impl());
3056 if (entry.couldBeWatched()) {
3057 addToGraph(PutGlobalVarCheck,
3058 OpInfo(codeBlock->globalObject()->assertRegisterIsInThisObject(putToBase->m_registerAddress)),
3064 case PutToBaseOperation::GlobalVariablePut:
3065 addToGraph(PutGlobalVar,
3066 OpInfo(m_inlineStackTop->m_codeBlock->globalObject()->assertRegisterIsInThisObject(putToBase->m_registerAddress)),
3069 case PutToBaseOperation::VariablePut: {
3070 NodeIndex scope = get(base);
3071 NodeIndex scopeRegisters = addToGraph(GetScopeRegisters, scope);
3072 addToGraph(PutScopedVar, OpInfo(putToBase->m_offset), scope, scopeRegisters, get(value));
3075 case PutToBaseOperation::GlobalPropertyPut: {
3076 if (!putToBase->m_structure) {
3077 addToGraph(Phantom, get(base));
3078 addToGraph(ForceOSRExit);
3079 NEXT_OPCODE(op_put_to_base);
3081 NodeIndex baseNode = get(base);
3082 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(putToBase->m_structure.get())), baseNode);
3083 NodeIndex propertyStorage;
3084 if (isInlineOffset(putToBase->m_offset))
3085 propertyStorage = baseNode;
3087 propertyStorage = addToGraph(GetButterfly, baseNode);
3088 addToGraph(PutByOffset, OpInfo(m_graph.m_storageAccessData.size()), propertyStorage, baseNode, get(value));
3090 StorageAccessData storageAccessData;
3091 storageAccessData.offset = indexRelativeToBase(putToBase->m_offset);
3092 storageAccessData.identifierNumber = identifier;
3093 m_graph.m_storageAccessData.append(storageAccessData);