2 * Copyright (C) 2011, 2012 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "DFGByteCodeParser.h"
31 #include "ArrayConstructor.h"
32 #include "CallLinkStatus.h"
33 #include "CodeBlock.h"
34 #include "DFGArrayMode.h"
35 #include "DFGByteCodeCache.h"
36 #include "DFGCapabilities.h"
37 #include "GetByIdStatus.h"
38 #include "PutByIdStatus.h"
39 #include "ResolveGlobalStatus.h"
40 #include <wtf/HashMap.h>
41 #include <wtf/MathExtras.h>
43 namespace JSC { namespace DFG {
45 class ConstantBufferKey {
53 ConstantBufferKey(WTF::HashTableDeletedValueType)
59 ConstantBufferKey(CodeBlock* codeBlock, unsigned index)
60 : m_codeBlock(codeBlock)
65 bool operator==(const ConstantBufferKey& other) const
67 return m_codeBlock == other.m_codeBlock
68 && m_index == other.m_index;
73 return WTF::PtrHash<CodeBlock*>::hash(m_codeBlock) ^ m_index;
76 bool isHashTableDeletedValue() const
78 return !m_codeBlock && m_index;
81 CodeBlock* codeBlock() const { return m_codeBlock; }
82 unsigned index() const { return m_index; }
85 CodeBlock* m_codeBlock;
89 struct ConstantBufferKeyHash {
90 static unsigned hash(const ConstantBufferKey& key) { return key.hash(); }
91 static bool equal(const ConstantBufferKey& a, const ConstantBufferKey& b)
96 static const bool safeToCompareToEmptyOrDeleted = true;
99 } } // namespace JSC::DFG
103 template<typename T> struct DefaultHash;
104 template<> struct DefaultHash<JSC::DFG::ConstantBufferKey> {
105 typedef JSC::DFG::ConstantBufferKeyHash Hash;
108 template<typename T> struct HashTraits;
109 template<> struct HashTraits<JSC::DFG::ConstantBufferKey> : SimpleClassHashTraits<JSC::DFG::ConstantBufferKey> { };
113 namespace JSC { namespace DFG {
115 // === ByteCodeParser ===
117 // This class is used to compile the dataflow graph from a CodeBlock.
118 class ByteCodeParser {
120 ByteCodeParser(ExecState* exec, Graph& graph)
122 , m_globalData(&graph.m_globalData)
123 , m_codeBlock(graph.m_codeBlock)
124 , m_profiledBlock(graph.m_profiledBlock)
128 , m_currentProfilingIndex(0)
129 , m_constantUndefined(UINT_MAX)
130 , m_constantNull(UINT_MAX)
131 , m_constantNaN(UINT_MAX)
132 , m_constant1(UINT_MAX)
133 , m_constants(m_codeBlock->numberOfConstantRegisters())
134 , m_numArguments(m_codeBlock->numParameters())
135 , m_numLocals(m_codeBlock->m_numCalleeRegisters)
136 , m_preservedVars(m_codeBlock->m_numVars)
137 , m_parameterSlots(0)
138 , m_numPassedVarArgs(0)
139 , m_globalResolveNumber(0)
140 , m_inlineStackTop(0)
141 , m_haveBuiltOperandMaps(false)
142 , m_emptyJSValueIndex(UINT_MAX)
143 , m_currentInstruction(0)
145 ASSERT(m_profiledBlock);
147 for (int i = 0; i < m_codeBlock->m_numVars; ++i)
148 m_preservedVars.set(i);
151 // Parse a full CodeBlock of bytecode.
155 // Just parse from m_currentIndex to the end of the current CodeBlock.
156 void parseCodeBlock();
158 // Helper for min and max.
159 bool handleMinMax(bool usesResult, int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis);
161 // Handle calls. This resolves issues surrounding inlining and intrinsics.
162 void handleCall(Interpreter*, Instruction* currentInstruction, NodeType op, CodeSpecializationKind);
163 void emitFunctionCheck(JSFunction* expectedFunction, NodeIndex callTarget, int registerOffset, CodeSpecializationKind);
164 // Handle inlining. Return true if it succeeded, false if we need to plant a call.
165 bool handleInlining(bool usesResult, int callTarget, NodeIndex callTargetNodeIndex, int resultOperand, bool certainAboutExpectedFunction, JSFunction*, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, CodeSpecializationKind);
166 // Handle setting the result of an intrinsic.
167 void setIntrinsicResult(bool usesResult, int resultOperand, NodeIndex);
168 // Handle intrinsic functions. Return true if it succeeded, false if we need to plant a call.
169 bool handleIntrinsic(bool usesResult, int resultOperand, Intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction);
170 bool handleConstantInternalFunction(bool usesResult, int resultOperand, InternalFunction*, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, CodeSpecializationKind);
171 NodeIndex handleGetByOffset(SpeculatedType, NodeIndex base, unsigned identifierNumber, PropertyOffset);
172 void handleGetByOffset(
173 int destinationOperand, SpeculatedType, NodeIndex base, unsigned identifierNumber,
176 int destinationOperand, SpeculatedType, NodeIndex base, unsigned identifierNumber,
177 const GetByIdStatus&);
179 // Convert a set of ResolveOperations into graph nodes
180 bool parseResolveOperations(SpeculatedType, unsigned identifierNumber, unsigned operations, unsigned putToBaseOperation, NodeIndex* base, NodeIndex* value);
182 // Prepare to parse a block.
183 void prepareToParseBlock();
184 // Parse a single basic block of bytecode instructions.
185 bool parseBlock(unsigned limit);
186 // Link block successors.
187 void linkBlock(BasicBlock*, Vector<BlockIndex>& possibleTargets);
188 void linkBlocks(Vector<UnlinkedBlock>& unlinkedBlocks, Vector<BlockIndex>& possibleTargets);
189 // Link GetLocal & SetLocal nodes, to ensure live values are generated.
194 template<PhiStackType stackType>
195 void processPhiStack();
197 void fixVariableAccessPredictions();
198 // Add spill locations to nodes.
199 void allocateVirtualRegisters();
201 VariableAccessData* newVariableAccessData(int operand, bool isCaptured)
203 ASSERT(operand < FirstConstantRegisterIndex);
205 m_graph.m_variableAccessData.append(VariableAccessData(static_cast<VirtualRegister>(operand), isCaptured));
206 return &m_graph.m_variableAccessData.last();
209 // Get/Set the operands/result of a bytecode instruction.
210 NodeIndex getDirect(int operand)
212 // Is this a constant?
213 if (operand >= FirstConstantRegisterIndex) {
214 unsigned constant = operand - FirstConstantRegisterIndex;
215 ASSERT(constant < m_constants.size());
216 return getJSConstant(constant);
219 if (operand == JSStack::Callee)
222 // Is this an argument?
223 if (operandIsArgument(operand))
224 return getArgument(operand);
227 return getLocal((unsigned)operand);
229 NodeIndex get(int operand)
231 return getDirect(m_inlineStackTop->remapOperand(operand));
233 enum SetMode { NormalSet, SetOnEntry };
234 void setDirect(int operand, NodeIndex value, SetMode setMode = NormalSet)
236 // Is this an argument?
237 if (operandIsArgument(operand)) {
238 setArgument(operand, value, setMode);
243 setLocal((unsigned)operand, value, setMode);
245 void set(int operand, NodeIndex value, SetMode setMode = NormalSet)
247 setDirect(m_inlineStackTop->remapOperand(operand), value, setMode);
250 NodeIndex injectLazyOperandSpeculation(NodeIndex nodeIndex)
252 Node& node = m_graph[nodeIndex];
253 ASSERT(node.op() == GetLocal);
254 ASSERT(node.codeOrigin.bytecodeIndex == m_currentIndex);
255 SpeculatedType prediction =
256 m_inlineStackTop->m_lazyOperands.prediction(
257 LazyOperandValueProfileKey(m_currentIndex, node.local()));
258 #if DFG_ENABLE(DEBUG_VERBOSE)
259 dataLogF("Lazy operand [@%u, bc#%u, r%d] prediction: %s\n",
260 nodeIndex, m_currentIndex, node.local(), speculationToString(prediction));
262 node.variableAccessData()->predict(prediction);
266 // Used in implementing get/set, above, where the operand is a local variable.
267 NodeIndex getLocal(unsigned operand)
269 NodeIndex nodeIndex = m_currentBlock->variablesAtTail.local(operand);
270 bool isCaptured = m_codeBlock->isCaptured(operand, m_inlineStackTop->m_inlineCallFrame);
272 if (nodeIndex != NoNode) {
273 Node* nodePtr = &m_graph[nodeIndex];
274 if (nodePtr->op() == Flush) {
275 // Two possibilities: either the block wants the local to be live
276 // but has not loaded its value, or it has loaded its value, in
277 // which case we're done.
278 nodeIndex = nodePtr->child1().index();
279 Node& flushChild = m_graph[nodeIndex];
280 if (flushChild.op() == Phi) {
281 VariableAccessData* variableAccessData = flushChild.variableAccessData();
282 variableAccessData->mergeIsCaptured(isCaptured);
283 nodeIndex = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variableAccessData), nodeIndex));
284 m_currentBlock->variablesAtTail.local(operand) = nodeIndex;
287 nodePtr = &flushChild;
290 ASSERT(&m_graph[nodeIndex] == nodePtr);
291 ASSERT(nodePtr->op() != Flush);
293 nodePtr->variableAccessData()->mergeIsCaptured(isCaptured);
296 // We wish to use the same variable access data as the previous access,
297 // but for all other purposes we want to issue a load since for all we
298 // know, at this stage of compilation, the local has been clobbered.
300 // Make sure we link to the Phi node, not to the GetLocal.
301 if (nodePtr->op() == GetLocal)
302 nodeIndex = nodePtr->child1().index();
304 NodeIndex newGetLocal = injectLazyOperandSpeculation(
305 addToGraph(GetLocal, OpInfo(nodePtr->variableAccessData()), nodeIndex));
306 m_currentBlock->variablesAtTail.local(operand) = newGetLocal;
310 if (nodePtr->op() == GetLocal)
312 ASSERT(nodePtr->op() == SetLocal);
313 return nodePtr->child1().index();
316 // Check for reads of temporaries from prior blocks,
317 // expand m_preservedVars to cover these.
318 m_preservedVars.set(operand);
320 VariableAccessData* variableAccessData = newVariableAccessData(operand, isCaptured);
322 NodeIndex phi = addToGraph(Phi, OpInfo(variableAccessData));
323 m_localPhiStack.append(PhiStackEntry(m_currentBlock, phi, operand));
324 nodeIndex = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variableAccessData), phi));
325 m_currentBlock->variablesAtTail.local(operand) = nodeIndex;
327 m_currentBlock->variablesAtHead.setLocalFirstTime(operand, nodeIndex);
331 void setLocal(unsigned operand, NodeIndex value, SetMode setMode = NormalSet)
333 bool isCaptured = m_codeBlock->isCaptured(operand, m_inlineStackTop->m_inlineCallFrame);
335 if (setMode == NormalSet) {
336 ArgumentPosition* argumentPosition = findArgumentPositionForLocal(operand);
337 if (isCaptured || argumentPosition)
338 flushDirect(operand, argumentPosition);
341 VariableAccessData* variableAccessData = newVariableAccessData(operand, isCaptured);
342 variableAccessData->mergeStructureCheckHoistingFailed(
343 m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
344 NodeIndex nodeIndex = addToGraph(SetLocal, OpInfo(variableAccessData), value);
345 m_currentBlock->variablesAtTail.local(operand) = nodeIndex;
348 // Used in implementing get/set, above, where the operand is an argument.
349 NodeIndex getArgument(unsigned operand)
351 unsigned argument = operandToArgument(operand);
352 ASSERT(argument < m_numArguments);
354 NodeIndex nodeIndex = m_currentBlock->variablesAtTail.argument(argument);
355 bool isCaptured = m_codeBlock->isCaptured(operand);
357 if (nodeIndex != NoNode) {
358 Node* nodePtr = &m_graph[nodeIndex];
359 if (nodePtr->op() == Flush) {
360 // Two possibilities: either the block wants the local to be live
361 // but has not loaded its value, or it has loaded its value, in
362 // which case we're done.
363 nodeIndex = nodePtr->child1().index();
364 Node& flushChild = m_graph[nodeIndex];
365 if (flushChild.op() == Phi) {
366 VariableAccessData* variableAccessData = flushChild.variableAccessData();
367 variableAccessData->mergeIsCaptured(isCaptured);
368 nodeIndex = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variableAccessData), nodeIndex));
369 m_currentBlock->variablesAtTail.argument(argument) = nodeIndex;
372 nodePtr = &flushChild;
375 ASSERT(&m_graph[nodeIndex] == nodePtr);
376 ASSERT(nodePtr->op() != Flush);
378 nodePtr->variableAccessData()->mergeIsCaptured(isCaptured);
380 if (nodePtr->op() == SetArgument) {
381 // We're getting an argument in the first basic block; link
382 // the GetLocal to the SetArgument.
383 ASSERT(nodePtr->local() == static_cast<VirtualRegister>(operand));
384 VariableAccessData* variable = nodePtr->variableAccessData();
385 nodeIndex = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variable), nodeIndex));
386 m_currentBlock->variablesAtTail.argument(argument) = nodeIndex;
391 if (nodePtr->op() == GetLocal)
392 nodeIndex = nodePtr->child1().index();
393 return injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(nodePtr->variableAccessData()), nodeIndex));
396 if (nodePtr->op() == GetLocal)
399 ASSERT(nodePtr->op() == SetLocal);
400 return nodePtr->child1().index();
403 VariableAccessData* variableAccessData = newVariableAccessData(operand, isCaptured);
405 NodeIndex phi = addToGraph(Phi, OpInfo(variableAccessData));
406 m_argumentPhiStack.append(PhiStackEntry(m_currentBlock, phi, argument));
407 nodeIndex = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variableAccessData), phi));
408 m_currentBlock->variablesAtTail.argument(argument) = nodeIndex;
410 m_currentBlock->variablesAtHead.setArgumentFirstTime(argument, nodeIndex);
414 void setArgument(int operand, NodeIndex value, SetMode setMode = NormalSet)
416 unsigned argument = operandToArgument(operand);
417 ASSERT(argument < m_numArguments);
419 bool isCaptured = m_codeBlock->isCaptured(operand);
421 // Always flush arguments, except for 'this'.
422 if (argument && setMode == NormalSet)
423 flushDirect(operand);
425 VariableAccessData* variableAccessData = newVariableAccessData(operand, isCaptured);
426 variableAccessData->mergeStructureCheckHoistingFailed(
427 m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
428 NodeIndex nodeIndex = addToGraph(SetLocal, OpInfo(variableAccessData), value);
429 m_currentBlock->variablesAtTail.argument(argument) = nodeIndex;
432 ArgumentPosition* findArgumentPositionForArgument(int argument)
434 InlineStackEntry* stack = m_inlineStackTop;
435 while (stack->m_inlineCallFrame)
436 stack = stack->m_caller;
437 return stack->m_argumentPositions[argument];
440 ArgumentPosition* findArgumentPositionForLocal(int operand)
442 for (InlineStackEntry* stack = m_inlineStackTop; ; stack = stack->m_caller) {
443 InlineCallFrame* inlineCallFrame = stack->m_inlineCallFrame;
444 if (!inlineCallFrame)
446 if (operand >= static_cast<int>(inlineCallFrame->stackOffset - JSStack::CallFrameHeaderSize))
448 if (operand == inlineCallFrame->stackOffset + CallFrame::thisArgumentOffset())
450 if (operand < static_cast<int>(inlineCallFrame->stackOffset - JSStack::CallFrameHeaderSize - inlineCallFrame->arguments.size()))
452 int argument = operandToArgument(operand - inlineCallFrame->stackOffset);
453 return stack->m_argumentPositions[argument];
458 ArgumentPosition* findArgumentPosition(int operand)
460 if (operandIsArgument(operand))
461 return findArgumentPositionForArgument(operandToArgument(operand));
462 return findArgumentPositionForLocal(operand);
465 void flush(int operand)
467 flushDirect(m_inlineStackTop->remapOperand(operand));
470 void flushDirect(int operand)
472 flushDirect(operand, findArgumentPosition(operand));
475 void flushDirect(int operand, ArgumentPosition* argumentPosition)
477 // FIXME: This should check if the same operand had already been flushed to
478 // some other local variable.
480 bool isCaptured = m_codeBlock->isCaptured(operand, m_inlineStackTop->m_inlineCallFrame);
482 ASSERT(operand < FirstConstantRegisterIndex);
486 if (operandIsArgument(operand)) {
487 index = operandToArgument(operand);
488 nodeIndex = m_currentBlock->variablesAtTail.argument(index);
491 nodeIndex = m_currentBlock->variablesAtTail.local(index);
492 m_preservedVars.set(operand);
495 if (nodeIndex != NoNode) {
496 Node& node = m_graph[nodeIndex];
499 nodeIndex = node.child1().index();
502 nodeIndex = node.child1().index();
508 ASSERT(m_graph[nodeIndex].op() != Flush
509 && m_graph[nodeIndex].op() != GetLocal);
511 // Emit a Flush regardless of whether we already flushed it.
512 // This gives us guidance to see that the variable also needs to be flushed
513 // for arguments, even if it already had to be flushed for other reasons.
514 VariableAccessData* variableAccessData = node.variableAccessData();
515 variableAccessData->mergeIsCaptured(isCaptured);
516 addToGraph(Flush, OpInfo(variableAccessData), nodeIndex);
517 if (argumentPosition)
518 argumentPosition->addVariable(variableAccessData);
522 VariableAccessData* variableAccessData = newVariableAccessData(operand, isCaptured);
523 NodeIndex phi = addToGraph(Phi, OpInfo(variableAccessData));
524 nodeIndex = addToGraph(Flush, OpInfo(variableAccessData), phi);
525 if (operandIsArgument(operand)) {
526 m_argumentPhiStack.append(PhiStackEntry(m_currentBlock, phi, index));
527 m_currentBlock->variablesAtTail.argument(index) = nodeIndex;
528 m_currentBlock->variablesAtHead.setArgumentFirstTime(index, nodeIndex);
530 m_localPhiStack.append(PhiStackEntry(m_currentBlock, phi, index));
531 m_currentBlock->variablesAtTail.local(index) = nodeIndex;
532 m_currentBlock->variablesAtHead.setLocalFirstTime(index, nodeIndex);
534 if (argumentPosition)
535 argumentPosition->addVariable(variableAccessData);
538 void flushArgumentsAndCapturedVariables()
541 if (m_inlineStackTop->m_inlineCallFrame)
542 numArguments = m_inlineStackTop->m_inlineCallFrame->arguments.size();
544 numArguments = m_inlineStackTop->m_codeBlock->numParameters();
545 for (unsigned argument = numArguments; argument-- > 1;)
546 flush(argumentToOperand(argument));
547 for (int local = 0; local < m_inlineStackTop->m_codeBlock->m_numVars; ++local) {
548 if (!m_inlineStackTop->m_codeBlock->isCaptured(local))
554 // Get an operand, and perform a ToInt32/ToNumber conversion on it.
555 NodeIndex getToInt32(int operand)
557 return toInt32(get(operand));
560 // Perform an ES5 ToInt32 operation - returns a node of type NodeResultInt32.
561 NodeIndex toInt32(NodeIndex index)
563 Node& node = m_graph[index];
565 if (node.hasInt32Result())
568 if (node.op() == UInt32ToNumber)
569 return node.child1().index();
571 // Check for numeric constants boxed as JSValues.
572 if (node.op() == JSConstant) {
573 JSValue v = valueOfJSConstant(index);
575 return getJSConstant(node.constantNumber());
577 return getJSConstantForValue(JSValue(JSC::toInt32(v.asNumber())));
580 return addToGraph(ValueToInt32, index);
583 NodeIndex getJSConstantForValue(JSValue constantValue)
585 unsigned constantIndex = m_codeBlock->addOrFindConstant(constantValue);
586 if (constantIndex >= m_constants.size())
587 m_constants.append(ConstantRecord());
589 ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters());
591 return getJSConstant(constantIndex);
594 NodeIndex getJSConstant(unsigned constant)
596 NodeIndex index = m_constants[constant].asJSValue;
600 NodeIndex resultIndex = addToGraph(JSConstant, OpInfo(constant));
601 m_constants[constant].asJSValue = resultIndex;
605 NodeIndex getCallee()
607 return addToGraph(GetCallee);
610 // Helper functions to get/set the this value.
613 return get(m_inlineStackTop->m_codeBlock->thisRegister());
615 void setThis(NodeIndex value)
617 set(m_inlineStackTop->m_codeBlock->thisRegister(), value);
620 // Convenience methods for checking nodes for constants.
621 bool isJSConstant(NodeIndex index)
623 return m_graph[index].op() == JSConstant;
625 bool isInt32Constant(NodeIndex nodeIndex)
627 return isJSConstant(nodeIndex) && valueOfJSConstant(nodeIndex).isInt32();
629 // Convenience methods for getting constant values.
630 JSValue valueOfJSConstant(NodeIndex index)
632 ASSERT(isJSConstant(index));
633 return m_codeBlock->getConstant(FirstConstantRegisterIndex + m_graph[index].constantNumber());
635 int32_t valueOfInt32Constant(NodeIndex nodeIndex)
637 ASSERT(isInt32Constant(nodeIndex));
638 return valueOfJSConstant(nodeIndex).asInt32();
641 // This method returns a JSConstant with the value 'undefined'.
642 NodeIndex constantUndefined()
644 // Has m_constantUndefined been set up yet?
645 if (m_constantUndefined == UINT_MAX) {
646 // Search the constant pool for undefined, if we find it, we can just reuse this!
647 unsigned numberOfConstants = m_codeBlock->numberOfConstantRegisters();
648 for (m_constantUndefined = 0; m_constantUndefined < numberOfConstants; ++m_constantUndefined) {
649 JSValue testMe = m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantUndefined);
650 if (testMe.isUndefined())
651 return getJSConstant(m_constantUndefined);
654 // Add undefined to the CodeBlock's constants, and add a corresponding slot in m_constants.
655 ASSERT(m_constants.size() == numberOfConstants);
656 m_codeBlock->addConstant(jsUndefined());
657 m_constants.append(ConstantRecord());
658 ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters());
661 // m_constantUndefined must refer to an entry in the CodeBlock's constant pool that has the value 'undefined'.
662 ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantUndefined).isUndefined());
663 return getJSConstant(m_constantUndefined);
666 // This method returns a JSConstant with the value 'null'.
667 NodeIndex constantNull()
669 // Has m_constantNull been set up yet?
670 if (m_constantNull == UINT_MAX) {
671 // Search the constant pool for null, if we find it, we can just reuse this!
672 unsigned numberOfConstants = m_codeBlock->numberOfConstantRegisters();
673 for (m_constantNull = 0; m_constantNull < numberOfConstants; ++m_constantNull) {
674 JSValue testMe = m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNull);
676 return getJSConstant(m_constantNull);
679 // Add null to the CodeBlock's constants, and add a corresponding slot in m_constants.
680 ASSERT(m_constants.size() == numberOfConstants);
681 m_codeBlock->addConstant(jsNull());
682 m_constants.append(ConstantRecord());
683 ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters());
686 // m_constantNull must refer to an entry in the CodeBlock's constant pool that has the value 'null'.
687 ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNull).isNull());
688 return getJSConstant(m_constantNull);
691 // This method returns a DoubleConstant with the value 1.
694 // Has m_constant1 been set up yet?
695 if (m_constant1 == UINT_MAX) {
696 // Search the constant pool for the value 1, if we find it, we can just reuse this!
697 unsigned numberOfConstants = m_codeBlock->numberOfConstantRegisters();
698 for (m_constant1 = 0; m_constant1 < numberOfConstants; ++m_constant1) {
699 JSValue testMe = m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constant1);
700 if (testMe.isInt32() && testMe.asInt32() == 1)
701 return getJSConstant(m_constant1);
704 // Add the value 1 to the CodeBlock's constants, and add a corresponding slot in m_constants.
705 ASSERT(m_constants.size() == numberOfConstants);
706 m_codeBlock->addConstant(jsNumber(1));
707 m_constants.append(ConstantRecord());
708 ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters());
711 // m_constant1 must refer to an entry in the CodeBlock's constant pool that has the integer value 1.
712 ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constant1).isInt32());
713 ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constant1).asInt32() == 1);
714 return getJSConstant(m_constant1);
717 // This method returns a DoubleConstant with the value NaN.
718 NodeIndex constantNaN()
720 JSValue nan = jsNaN();
722 // Has m_constantNaN been set up yet?
723 if (m_constantNaN == UINT_MAX) {
724 // Search the constant pool for the value NaN, if we find it, we can just reuse this!
725 unsigned numberOfConstants = m_codeBlock->numberOfConstantRegisters();
726 for (m_constantNaN = 0; m_constantNaN < numberOfConstants; ++m_constantNaN) {
727 JSValue testMe = m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNaN);
728 if (JSValue::encode(testMe) == JSValue::encode(nan))
729 return getJSConstant(m_constantNaN);
732 // Add the value nan to the CodeBlock's constants, and add a corresponding slot in m_constants.
733 ASSERT(m_constants.size() == numberOfConstants);
734 m_codeBlock->addConstant(nan);
735 m_constants.append(ConstantRecord());
736 ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters());
739 // m_constantNaN must refer to an entry in the CodeBlock's constant pool that has the value nan.
740 ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNaN).isDouble());
741 ASSERT(isnan(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNaN).asDouble()));
742 return getJSConstant(m_constantNaN);
745 NodeIndex cellConstant(JSCell* cell)
747 HashMap<JSCell*, NodeIndex>::AddResult result = m_cellConstantNodes.add(cell, NoNode);
748 if (result.isNewEntry)
749 result.iterator->value = addToGraph(WeakJSConstant, OpInfo(cell));
751 return result.iterator->value;
754 CodeOrigin currentCodeOrigin()
756 return CodeOrigin(m_currentIndex, m_inlineStackTop->m_inlineCallFrame, m_currentProfilingIndex - m_currentIndex);
759 // These methods create a node and add it to the graph. If nodes of this type are
760 // 'mustGenerate' then the node will implicitly be ref'ed to ensure generation.
761 NodeIndex addToGraph(NodeType op, NodeIndex child1 = NoNode, NodeIndex child2 = NoNode, NodeIndex child3 = NoNode)
763 NodeIndex resultIndex = (NodeIndex)m_graph.size();
764 m_graph.append(Node(op, currentCodeOrigin(), child1, child2, child3));
766 m_currentBlock->append(resultIndex);
768 if (defaultFlags(op) & NodeMustGenerate)
769 m_graph.ref(resultIndex);
772 NodeIndex addToGraph(NodeType op, OpInfo info, NodeIndex child1 = NoNode, NodeIndex child2 = NoNode, NodeIndex child3 = NoNode)
774 NodeIndex resultIndex = (NodeIndex)m_graph.size();
775 m_graph.append(Node(op, currentCodeOrigin(), info, child1, child2, child3));
777 m_currentBlock->phis.append(resultIndex);
779 m_currentBlock->append(resultIndex);
781 if (defaultFlags(op) & NodeMustGenerate)
782 m_graph.ref(resultIndex);
785 NodeIndex addToGraph(NodeType op, OpInfo info1, OpInfo info2, NodeIndex child1 = NoNode, NodeIndex child2 = NoNode, NodeIndex child3 = NoNode)
787 NodeIndex resultIndex = (NodeIndex)m_graph.size();
788 m_graph.append(Node(op, currentCodeOrigin(), info1, info2, child1, child2, child3));
790 m_currentBlock->append(resultIndex);
792 if (defaultFlags(op) & NodeMustGenerate)
793 m_graph.ref(resultIndex);
797 NodeIndex addToGraph(Node::VarArgTag, NodeType op, OpInfo info1, OpInfo info2)
799 NodeIndex resultIndex = (NodeIndex)m_graph.size();
800 m_graph.append(Node(Node::VarArg, op, currentCodeOrigin(), info1, info2, m_graph.m_varArgChildren.size() - m_numPassedVarArgs, m_numPassedVarArgs));
802 m_currentBlock->append(resultIndex);
804 m_numPassedVarArgs = 0;
806 if (defaultFlags(op) & NodeMustGenerate)
807 m_graph.ref(resultIndex);
811 NodeIndex insertPhiNode(OpInfo info, BasicBlock* block)
813 NodeIndex resultIndex = (NodeIndex)m_graph.size();
814 m_graph.append(Node(Phi, currentCodeOrigin(), info));
815 block->phis.append(resultIndex);
820 void addVarArgChild(NodeIndex child)
822 m_graph.m_varArgChildren.append(Edge(child));
823 m_numPassedVarArgs++;
826 NodeIndex addCall(Interpreter* interpreter, Instruction* currentInstruction, NodeType op)
828 Instruction* putInstruction = currentInstruction + OPCODE_LENGTH(op_call);
830 SpeculatedType prediction = SpecNone;
831 if (interpreter->getOpcodeID(putInstruction->u.opcode) == op_call_put_result) {
832 m_currentProfilingIndex = m_currentIndex + OPCODE_LENGTH(op_call);
833 prediction = getPrediction();
836 addVarArgChild(get(currentInstruction[1].u.operand));
837 int argCount = currentInstruction[2].u.operand;
838 if (JSStack::CallFrameHeaderSize + (unsigned)argCount > m_parameterSlots)
839 m_parameterSlots = JSStack::CallFrameHeaderSize + argCount;
841 int registerOffset = currentInstruction[3].u.operand;
842 int dummyThisArgument = op == Call ? 0 : 1;
843 for (int i = 0 + dummyThisArgument; i < argCount; ++i)
844 addVarArgChild(get(registerOffset + argumentToOperand(i)));
846 NodeIndex call = addToGraph(Node::VarArg, op, OpInfo(0), OpInfo(prediction));
847 if (interpreter->getOpcodeID(putInstruction->u.opcode) == op_call_put_result)
848 set(putInstruction[1].u.operand, call);
852 NodeIndex addStructureTransitionCheck(JSCell* object, Structure* structure)
854 // Add a weak JS constant for the object regardless, since the code should
855 // be jettisoned if the object ever dies.
856 NodeIndex objectIndex = cellConstant(object);
858 if (object->structure() == structure && structure->transitionWatchpointSetIsStillValid()) {
859 addToGraph(StructureTransitionWatchpoint, OpInfo(structure), objectIndex);
863 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(structure)), objectIndex);
868 NodeIndex addStructureTransitionCheck(JSCell* object)
870 return addStructureTransitionCheck(object, object->structure());
873 SpeculatedType getPredictionWithoutOSRExit(NodeIndex nodeIndex, unsigned bytecodeIndex)
875 UNUSED_PARAM(nodeIndex);
877 SpeculatedType prediction = m_inlineStackTop->m_profiledBlock->valueProfilePredictionForBytecodeOffset(bytecodeIndex);
878 #if DFG_ENABLE(DEBUG_VERBOSE)
879 dataLogF("Dynamic [@%u, bc#%u] prediction: %s\n", nodeIndex, bytecodeIndex, speculationToString(prediction));
885 SpeculatedType getPrediction(NodeIndex nodeIndex, unsigned bytecodeIndex)
887 SpeculatedType prediction = getPredictionWithoutOSRExit(nodeIndex, bytecodeIndex);
889 if (prediction == SpecNone) {
890 // We have no information about what values this node generates. Give up
891 // on executing this code, since we're likely to do more damage than good.
892 addToGraph(ForceOSRExit);
898 SpeculatedType getPredictionWithoutOSRExit()
900 return getPredictionWithoutOSRExit(m_graph.size(), m_currentProfilingIndex);
903 SpeculatedType getPrediction()
905 return getPrediction(m_graph.size(), m_currentProfilingIndex);
908 ArrayMode getArrayMode(ArrayProfile* profile, Array::Action action)
910 profile->computeUpdatedPrediction(m_inlineStackTop->m_codeBlock);
911 return ArrayMode::fromObserved(profile, action, false);
914 ArrayMode getArrayMode(ArrayProfile* profile)
916 return getArrayMode(profile, Array::Read);
919 ArrayMode getArrayModeAndEmitChecks(ArrayProfile* profile, Array::Action action, NodeIndex base)
921 profile->computeUpdatedPrediction(m_inlineStackTop->m_codeBlock);
923 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
924 if (m_inlineStackTop->m_profiledBlock->numberOfRareCaseProfiles())
925 dataLogF("Slow case profile for bc#%u: %u\n", m_currentIndex, m_inlineStackTop->m_profiledBlock->rareCaseProfileForBytecodeOffset(m_currentIndex)->m_counter);
926 dataLogF("Array profile for bc#%u: %p%s%s, %u\n", m_currentIndex, profile->expectedStructure(), profile->structureIsPolymorphic() ? " (polymorphic)" : "", profile->mayInterceptIndexedAccesses() ? " (may intercept)" : "", profile->observedArrayModes());
930 m_inlineStackTop->m_profiledBlock->couldTakeSlowCase(m_currentIndex)
931 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, OutOfBounds);
933 ArrayMode result = ArrayMode::fromObserved(profile, action, makeSafe);
935 if (profile->hasDefiniteStructure() && result.benefitsFromStructureCheck())
936 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(profile->expectedStructure())), base);
941 NodeIndex makeSafe(NodeIndex nodeIndex)
943 Node& node = m_graph[nodeIndex];
945 bool likelyToTakeSlowCase;
946 if (!isX86() && node.op() == ArithMod)
947 likelyToTakeSlowCase = false;
949 likelyToTakeSlowCase = m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex);
951 if (!likelyToTakeSlowCase
952 && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow)
953 && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
956 switch (m_graph[nodeIndex].op()) {
962 case ArithMod: // for ArithMod "MayOverflow" means we tried to divide by zero, or we saw double.
963 m_graph[nodeIndex].mergeFlags(NodeMayOverflow);
967 if (m_inlineStackTop->m_profiledBlock->likelyToTakeDeepestSlowCase(m_currentIndex)
968 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow)) {
969 #if DFG_ENABLE(DEBUG_VERBOSE)
970 dataLogF("Making ArithMul @%u take deepest slow case.\n", nodeIndex);
972 m_graph[nodeIndex].mergeFlags(NodeMayOverflow | NodeMayNegZero);
973 } else if (m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)
974 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero)) {
975 #if DFG_ENABLE(DEBUG_VERBOSE)
976 dataLogF("Making ArithMul @%u take faster slow case.\n", nodeIndex);
978 m_graph[nodeIndex].mergeFlags(NodeMayNegZero);
983 ASSERT_NOT_REACHED();
990 NodeIndex makeDivSafe(NodeIndex nodeIndex)
992 ASSERT(m_graph[nodeIndex].op() == ArithDiv);
994 // The main slow case counter for op_div in the old JIT counts only when
995 // the operands are not numbers. We don't care about that since we already
996 // have speculations in place that take care of that separately. We only
997 // care about when the outcome of the division is not an integer, which
998 // is what the special fast case counter tells us.
1000 if (!m_inlineStackTop->m_profiledBlock->couldTakeSpecialFastCase(m_currentIndex)
1001 && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow)
1002 && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
1005 #if DFG_ENABLE(DEBUG_VERBOSE)
1006 dataLogF("Making %s @%u safe at bc#%u because special fast-case counter is at %u and exit profiles say %d, %d\n", Graph::opName(m_graph[nodeIndex].op()), nodeIndex, m_currentIndex, m_inlineStackTop->m_profiledBlock->specialFastCaseProfileForBytecodeOffset(m_currentIndex)->m_counter, m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow), m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero));
1009 // FIXME: It might be possible to make this more granular. The DFG certainly can
1010 // distinguish between negative zero and overflow in its exit profiles.
1011 m_graph[nodeIndex].mergeFlags(NodeMayOverflow | NodeMayNegZero);
1016 bool willNeedFlush(StructureStubInfo& stubInfo)
1018 PolymorphicAccessStructureList* list;
1020 switch (stubInfo.accessType) {
1021 case access_get_by_id_self_list:
1022 list = stubInfo.u.getByIdSelfList.structureList;
1023 listSize = stubInfo.u.getByIdSelfList.listSize;
1025 case access_get_by_id_proto_list:
1026 list = stubInfo.u.getByIdProtoList.structureList;
1027 listSize = stubInfo.u.getByIdProtoList.listSize;
1032 for (int i = 0; i < listSize; ++i) {
1033 if (!list->list[i].isDirect)
1039 bool structureChainIsStillValid(bool direct, Structure* previousStructure, StructureChain* chain)
1044 if (!previousStructure->storedPrototype().isNull() && previousStructure->storedPrototype().asCell()->structure() != chain->head()->get())
1047 for (WriteBarrier<Structure>* it = chain->head(); *it; ++it) {
1048 if (!(*it)->storedPrototype().isNull() && (*it)->storedPrototype().asCell()->structure() != it[1].get())
1055 void buildOperandMapsIfNecessary();
1058 JSGlobalData* m_globalData;
1059 CodeBlock* m_codeBlock;
1060 CodeBlock* m_profiledBlock;
1063 // The current block being generated.
1064 BasicBlock* m_currentBlock;
1065 // The bytecode index of the current instruction being generated.
1066 unsigned m_currentIndex;
1067 // The bytecode index of the value profile of the current instruction being generated.
1068 unsigned m_currentProfilingIndex;
1070 // We use these values during code generation, and to avoid the need for
1071 // special handling we make sure they are available as constants in the
1072 // CodeBlock's constant pool. These variables are initialized to
1073 // UINT_MAX, and lazily updated to hold an index into the CodeBlock's
1074 // constant pool, as necessary.
1075 unsigned m_constantUndefined;
1076 unsigned m_constantNull;
1077 unsigned m_constantNaN;
1078 unsigned m_constant1;
1079 HashMap<JSCell*, unsigned> m_cellConstants;
1080 HashMap<JSCell*, NodeIndex> m_cellConstantNodes;
1082 // A constant in the constant pool may be represented by more than one
1083 // node in the graph, depending on the context in which it is being used.
1084 struct ConstantRecord {
1093 NodeIndex asNumeric;
1094 NodeIndex asJSValue;
1097 // Track the index of the node whose result is the current value for every
1098 // register value in the bytecode - argument, local, and temporary.
1099 Vector<ConstantRecord, 16> m_constants;
1101 // The number of arguments passed to the function.
1102 unsigned m_numArguments;
1103 // The number of locals (vars + temporaries) used in the function.
1104 unsigned m_numLocals;
1105 // The set of registers we need to preserve across BasicBlock boundaries;
1106 // typically equal to the set of vars, but we expand this to cover all
1107 // temporaries that persist across blocks (dues to ?:, &&, ||, etc).
1108 BitVector m_preservedVars;
1109 // The number of slots (in units of sizeof(Register)) that we need to
1110 // preallocate for calls emanating from this frame. This includes the
1111 // size of the CallFrame, only if this is not a leaf function. (I.e.
1112 // this is 0 if and only if this function is a leaf.)
1113 unsigned m_parameterSlots;
1114 // The number of var args passed to the next var arg node.
1115 unsigned m_numPassedVarArgs;
1116 // The index in the global resolve info.
1117 unsigned m_globalResolveNumber;
1119 struct PhiStackEntry {
1120 PhiStackEntry(BasicBlock* block, NodeIndex phi, unsigned varNo)
1127 BasicBlock* m_block;
1131 Vector<PhiStackEntry, 16> m_argumentPhiStack;
1132 Vector<PhiStackEntry, 16> m_localPhiStack;
1134 HashMap<ConstantBufferKey, unsigned> m_constantBufferCache;
1136 struct InlineStackEntry {
1137 ByteCodeParser* m_byteCodeParser;
1139 CodeBlock* m_codeBlock;
1140 CodeBlock* m_profiledBlock;
1141 InlineCallFrame* m_inlineCallFrame;
1142 VirtualRegister m_calleeVR; // absolute virtual register, not relative to call frame
1144 ScriptExecutable* executable() { return m_codeBlock->ownerExecutable(); }
1146 QueryableExitProfile m_exitProfile;
1148 // Remapping of identifier and constant numbers from the code block being
1149 // inlined (inline callee) to the code block that we're inlining into
1150 // (the machine code block, which is the transitive, though not necessarily
1152 Vector<unsigned> m_identifierRemap;
1153 Vector<unsigned> m_constantRemap;
1154 Vector<unsigned> m_constantBufferRemap;
1155 Vector<unsigned> m_resolveOperationRemap;
1156 Vector<unsigned> m_putToBaseOperationRemap;
1158 // Blocks introduced by this code block, which need successor linking.
1159 // May include up to one basic block that includes the continuation after
1160 // the callsite in the caller. These must be appended in the order that they
1161 // are created, but their bytecodeBegin values need not be in order as they
1163 Vector<UnlinkedBlock> m_unlinkedBlocks;
1165 // Potential block linking targets. Must be sorted by bytecodeBegin, and
1166 // cannot have two blocks that have the same bytecodeBegin. For this very
1167 // reason, this is not equivalent to
1168 Vector<BlockIndex> m_blockLinkingTargets;
1170 // If the callsite's basic block was split into two, then this will be
1171 // the head of the callsite block. It needs its successors linked to the
1172 // m_unlinkedBlocks, but not the other way around: there's no way for
1173 // any blocks in m_unlinkedBlocks to jump back into this block.
1174 BlockIndex m_callsiteBlockHead;
1176 // Does the callsite block head need linking? This is typically true
1177 // but will be false for the machine code block's inline stack entry
1178 // (since that one is not inlined) and for cases where an inline callee
1179 // did the linking for us.
1180 bool m_callsiteBlockHeadNeedsLinking;
1182 VirtualRegister m_returnValue;
1184 // Speculations about variable types collected from the profiled code block,
1185 // which are based on OSR exit profiles that past DFG compilatins of this
1186 // code block had gathered.
1187 LazyOperandValueProfileParser m_lazyOperands;
1189 // Did we see any returns? We need to handle the (uncommon but necessary)
1190 // case where a procedure that does not return was inlined.
1193 // Did we have any early returns?
1194 bool m_didEarlyReturn;
1196 // Pointers to the argument position trackers for this slice of code.
1197 Vector<ArgumentPosition*> m_argumentPositions;
1199 InlineStackEntry* m_caller;
1204 CodeBlock* profiledBlock,
1205 BlockIndex callsiteBlockHead,
1206 VirtualRegister calleeVR,
1208 VirtualRegister returnValueVR,
1209 VirtualRegister inlineCallFrameStart,
1210 int argumentCountIncludingThis,
1211 CodeSpecializationKind);
1215 m_byteCodeParser->m_inlineStackTop = m_caller;
1218 int remapOperand(int operand) const
1220 if (!m_inlineCallFrame)
1223 if (operand >= FirstConstantRegisterIndex) {
1224 int result = m_constantRemap[operand - FirstConstantRegisterIndex];
1225 ASSERT(result >= FirstConstantRegisterIndex);
1229 if (operand == JSStack::Callee)
1232 return operand + m_inlineCallFrame->stackOffset;
1236 InlineStackEntry* m_inlineStackTop;
1238 // Have we built operand maps? We initialize them lazily, and only when doing
1240 bool m_haveBuiltOperandMaps;
1241 // Mapping between identifier names and numbers.
1242 IdentifierMap m_identifierMap;
1243 // Mapping between values and constant numbers.
1244 JSValueMap m_jsValueMap;
1245 // Index of the empty value, or UINT_MAX if there is no mapping. This is a horrible
1246 // work-around for the fact that JSValueMap can't handle "empty" values.
1247 unsigned m_emptyJSValueIndex;
1249 // Cache of code blocks that we've generated bytecode for.
1250 ByteCodeCache<canInlineFunctionFor> m_codeBlockCache;
1252 Instruction* m_currentInstruction;
1255 #define NEXT_OPCODE(name) \
1256 m_currentIndex += OPCODE_LENGTH(name); \
1259 #define LAST_OPCODE(name) \
1260 m_currentIndex += OPCODE_LENGTH(name); \
1261 return shouldContinueParsing
1264 void ByteCodeParser::handleCall(Interpreter* interpreter, Instruction* currentInstruction, NodeType op, CodeSpecializationKind kind)
1266 ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct));
1268 NodeIndex callTarget = get(currentInstruction[1].u.operand);
1271 ConstantInternalFunction,
1276 CallLinkStatus callLinkStatus = CallLinkStatus::computeFor(
1277 m_inlineStackTop->m_profiledBlock, m_currentIndex);
1279 #if DFG_ENABLE(DEBUG_VERBOSE)
1280 dataLogF("For call at @%lu bc#%u: ", m_graph.size(), m_currentIndex);
1281 if (callLinkStatus.isSet()) {
1282 if (callLinkStatus.couldTakeSlowPath())
1283 dataLogF("could take slow path, ");
1284 dataLogF("target = %p\n", callLinkStatus.callTarget());
1286 dataLogF("not set.\n");
1289 if (m_graph.isFunctionConstant(callTarget)) {
1290 callType = ConstantFunction;
1291 #if DFG_ENABLE(DEBUG_VERBOSE)
1292 dataLogF("Call at [@%lu, bc#%u] has a function constant: %p, exec %p.\n",
1293 m_graph.size(), m_currentIndex,
1294 m_graph.valueOfFunctionConstant(callTarget),
1295 m_graph.valueOfFunctionConstant(callTarget)->executable());
1297 } else if (m_graph.isInternalFunctionConstant(callTarget)) {
1298 callType = ConstantInternalFunction;
1299 #if DFG_ENABLE(DEBUG_VERBOSE)
1300 dataLogF("Call at [@%lu, bc#%u] has an internal function constant: %p.\n",
1301 m_graph.size(), m_currentIndex,
1302 m_graph.valueOfInternalFunctionConstant(callTarget));
1304 } else if (callLinkStatus.isSet() && !callLinkStatus.couldTakeSlowPath()
1305 && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)) {
1306 callType = LinkedFunction;
1307 #if DFG_ENABLE(DEBUG_VERBOSE)
1308 dataLogF("Call at [@%lu, bc#%u] is linked to: %p, exec %p.\n",
1309 m_graph.size(), m_currentIndex, callLinkStatus.callTarget(),
1310 callLinkStatus.callTarget()->executable());
1313 callType = UnknownFunction;
1314 #if DFG_ENABLE(DEBUG_VERBOSE)
1315 dataLogF("Call at [@%lu, bc#%u] is has an unknown or ambiguous target.\n",
1316 m_graph.size(), m_currentIndex);
1319 if (callType != UnknownFunction) {
1320 int argumentCountIncludingThis = currentInstruction[2].u.operand;
1321 int registerOffset = currentInstruction[3].u.operand;
1323 // Do we have a result?
1324 bool usesResult = false;
1325 int resultOperand = 0; // make compiler happy
1326 unsigned nextOffset = m_currentIndex + OPCODE_LENGTH(op_call);
1327 Instruction* putInstruction = currentInstruction + OPCODE_LENGTH(op_call);
1328 SpeculatedType prediction = SpecNone;
1329 if (interpreter->getOpcodeID(putInstruction->u.opcode) == op_call_put_result) {
1330 resultOperand = putInstruction[1].u.operand;
1332 m_currentProfilingIndex = nextOffset;
1333 prediction = getPrediction();
1334 nextOffset += OPCODE_LENGTH(op_call_put_result);
1337 if (callType == ConstantInternalFunction) {
1338 if (handleConstantInternalFunction(usesResult, resultOperand, m_graph.valueOfInternalFunctionConstant(callTarget), registerOffset, argumentCountIncludingThis, prediction, kind))
1341 // Can only handle this using the generic call handler.
1342 addCall(interpreter, currentInstruction, op);
1346 JSFunction* expectedFunction;
1347 Intrinsic intrinsic;
1348 bool certainAboutExpectedFunction;
1349 if (callType == ConstantFunction) {
1350 expectedFunction = m_graph.valueOfFunctionConstant(callTarget);
1351 intrinsic = expectedFunction->executable()->intrinsicFor(kind);
1352 certainAboutExpectedFunction = true;
1354 ASSERT(callType == LinkedFunction);
1355 expectedFunction = callLinkStatus.callTarget();
1356 intrinsic = expectedFunction->executable()->intrinsicFor(kind);
1357 certainAboutExpectedFunction = false;
1360 if (intrinsic != NoIntrinsic) {
1361 if (!certainAboutExpectedFunction)
1362 emitFunctionCheck(expectedFunction, callTarget, registerOffset, kind);
1364 if (handleIntrinsic(usesResult, resultOperand, intrinsic, registerOffset, argumentCountIncludingThis, prediction)) {
1365 if (!certainAboutExpectedFunction) {
1366 // Need to keep the call target alive for OSR. We could easily optimize this out if we wanted
1367 // to, since at this point we know that the call target is a constant. It's just that OSR isn't
1368 // smart enough to figure that out, since it doesn't understand CheckFunction.
1369 addToGraph(Phantom, callTarget);
1374 } else if (handleInlining(usesResult, currentInstruction[1].u.operand, callTarget, resultOperand, certainAboutExpectedFunction, expectedFunction, registerOffset, argumentCountIncludingThis, nextOffset, kind))
1378 addCall(interpreter, currentInstruction, op);
1381 void ByteCodeParser::emitFunctionCheck(JSFunction* expectedFunction, NodeIndex callTarget, int registerOffset, CodeSpecializationKind kind)
1383 NodeIndex thisArgument;
1384 if (kind == CodeForCall)
1385 thisArgument = get(registerOffset + argumentToOperand(0));
1387 thisArgument = NoNode;
1388 addToGraph(CheckFunction, OpInfo(expectedFunction), callTarget, thisArgument);
1391 bool ByteCodeParser::handleInlining(bool usesResult, int callTarget, NodeIndex callTargetNodeIndex, int resultOperand, bool certainAboutExpectedFunction, JSFunction* expectedFunction, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, CodeSpecializationKind kind)
1393 // First, the really simple checks: do we have an actual JS function?
1394 if (!expectedFunction)
1396 if (expectedFunction->isHostFunction())
1399 FunctionExecutable* executable = expectedFunction->jsExecutable();
1401 // Does the number of arguments we're passing match the arity of the target? We currently
1402 // inline only if the number of arguments passed is greater than or equal to the number
1403 // arguments expected.
1404 if (static_cast<int>(executable->parameterCount()) + 1 > argumentCountIncludingThis)
1407 // Have we exceeded inline stack depth, or are we trying to inline a recursive call?
1408 // If either of these are detected, then don't inline.
1410 for (InlineStackEntry* entry = m_inlineStackTop; entry; entry = entry->m_caller) {
1412 if (depth >= Options::maximumInliningDepth())
1413 return false; // Depth exceeded.
1415 if (entry->executable() == executable)
1416 return false; // Recursion detected.
1419 // Does the code block's size match the heuristics/requirements for being
1420 // an inline candidate?
1421 CodeBlock* profiledBlock = executable->profiledCodeBlockFor(kind);
1425 if (!mightInlineFunctionFor(profiledBlock, kind))
1428 // If we get here then it looks like we should definitely inline this code. Proceed
1429 // with parsing the code to get bytecode, so that we can then parse the bytecode.
1430 // Note that if LLInt is enabled, the bytecode will always be available. Also note
1431 // that if LLInt is enabled, we may inline a code block that has never been JITted
1433 CodeBlock* codeBlock = m_codeBlockCache.get(CodeBlockKey(executable, kind), expectedFunction->scope());
1437 ASSERT(canInlineFunctionFor(codeBlock, kind));
1439 #if DFG_ENABLE(DEBUG_VERBOSE)
1440 dataLogF("Inlining executable %p.\n", executable);
1443 // Now we know without a doubt that we are committed to inlining. So begin the process
1444 // by checking the callee (if necessary) and making sure that arguments and the callee
1446 if (!certainAboutExpectedFunction)
1447 emitFunctionCheck(expectedFunction, callTargetNodeIndex, registerOffset, kind);
1449 // FIXME: Don't flush constants!
1451 int inlineCallFrameStart = m_inlineStackTop->remapOperand(registerOffset) - JSStack::CallFrameHeaderSize;
1453 // Make sure that the area used by the call frame is reserved.
1454 for (int arg = inlineCallFrameStart + JSStack::CallFrameHeaderSize + codeBlock->m_numVars; arg-- > inlineCallFrameStart;)
1455 m_preservedVars.set(arg);
1457 // Make sure that we have enough locals.
1458 unsigned newNumLocals = inlineCallFrameStart + JSStack::CallFrameHeaderSize + codeBlock->m_numCalleeRegisters;
1459 if (newNumLocals > m_numLocals) {
1460 m_numLocals = newNumLocals;
1461 for (size_t i = 0; i < m_graph.m_blocks.size(); ++i)
1462 m_graph.m_blocks[i]->ensureLocals(newNumLocals);
1465 size_t argumentPositionStart = m_graph.m_argumentPositions.size();
1467 InlineStackEntry inlineStackEntry(
1468 this, codeBlock, profiledBlock, m_graph.m_blocks.size() - 1,
1469 (VirtualRegister)m_inlineStackTop->remapOperand(callTarget), expectedFunction,
1470 (VirtualRegister)m_inlineStackTop->remapOperand(
1471 usesResult ? resultOperand : InvalidVirtualRegister),
1472 (VirtualRegister)inlineCallFrameStart, argumentCountIncludingThis, kind);
1474 // This is where the actual inlining really happens.
1475 unsigned oldIndex = m_currentIndex;
1476 unsigned oldProfilingIndex = m_currentProfilingIndex;
1478 m_currentProfilingIndex = 0;
1480 addToGraph(InlineStart, OpInfo(argumentPositionStart));
1484 m_currentIndex = oldIndex;
1485 m_currentProfilingIndex = oldProfilingIndex;
1487 // If the inlined code created some new basic blocks, then we have linking to do.
1488 if (inlineStackEntry.m_callsiteBlockHead != m_graph.m_blocks.size() - 1) {
1490 ASSERT(!inlineStackEntry.m_unlinkedBlocks.isEmpty());
1491 if (inlineStackEntry.m_callsiteBlockHeadNeedsLinking)
1492 linkBlock(m_graph.m_blocks[inlineStackEntry.m_callsiteBlockHead].get(), inlineStackEntry.m_blockLinkingTargets);
1494 ASSERT(m_graph.m_blocks[inlineStackEntry.m_callsiteBlockHead]->isLinked);
1496 // It's possible that the callsite block head is not owned by the caller.
1497 if (!inlineStackEntry.m_caller->m_unlinkedBlocks.isEmpty()) {
1498 // It's definitely owned by the caller, because the caller created new blocks.
1499 // Assert that this all adds up.
1500 ASSERT(inlineStackEntry.m_caller->m_unlinkedBlocks.last().m_blockIndex == inlineStackEntry.m_callsiteBlockHead);
1501 ASSERT(inlineStackEntry.m_caller->m_unlinkedBlocks.last().m_needsNormalLinking);
1502 inlineStackEntry.m_caller->m_unlinkedBlocks.last().m_needsNormalLinking = false;
1504 // It's definitely not owned by the caller. Tell the caller that he does not
1505 // need to link his callsite block head, because we did it for him.
1506 ASSERT(inlineStackEntry.m_caller->m_callsiteBlockHeadNeedsLinking);
1507 ASSERT(inlineStackEntry.m_caller->m_callsiteBlockHead == inlineStackEntry.m_callsiteBlockHead);
1508 inlineStackEntry.m_caller->m_callsiteBlockHeadNeedsLinking = false;
1511 linkBlocks(inlineStackEntry.m_unlinkedBlocks, inlineStackEntry.m_blockLinkingTargets);
1513 ASSERT(inlineStackEntry.m_unlinkedBlocks.isEmpty());
1515 // If there was a return, but no early returns, then we're done. We allow parsing of
1516 // the caller to continue in whatever basic block we're in right now.
1517 if (!inlineStackEntry.m_didEarlyReturn && inlineStackEntry.m_didReturn) {
1518 BasicBlock* lastBlock = m_graph.m_blocks.last().get();
1519 ASSERT(lastBlock->isEmpty() || !m_graph.last().isTerminal());
1521 // If we created new blocks then the last block needs linking, but in the
1522 // caller. It doesn't need to be linked to, but it needs outgoing links.
1523 if (!inlineStackEntry.m_unlinkedBlocks.isEmpty()) {
1524 #if DFG_ENABLE(DEBUG_VERBOSE)
1525 dataLogF("Reascribing bytecode index of block %p from bc#%u to bc#%u (inline return case).\n", lastBlock, lastBlock->bytecodeBegin, m_currentIndex);
1527 // For debugging purposes, set the bytecodeBegin. Note that this doesn't matter
1528 // for release builds because this block will never serve as a potential target
1529 // in the linker's binary search.
1530 lastBlock->bytecodeBegin = m_currentIndex;
1531 m_inlineStackTop->m_caller->m_unlinkedBlocks.append(UnlinkedBlock(m_graph.m_blocks.size() - 1));
1534 m_currentBlock = m_graph.m_blocks.last().get();
1536 #if DFG_ENABLE(DEBUG_VERBOSE)
1537 dataLogF("Done inlining executable %p, continuing code generation at epilogue.\n", executable);
1542 // If we get to this point then all blocks must end in some sort of terminals.
1543 ASSERT(m_graph.last().isTerminal());
1545 // Link the early returns to the basic block we're about to create.
1546 for (size_t i = 0; i < inlineStackEntry.m_unlinkedBlocks.size(); ++i) {
1547 if (!inlineStackEntry.m_unlinkedBlocks[i].m_needsEarlyReturnLinking)
1549 BasicBlock* block = m_graph.m_blocks[inlineStackEntry.m_unlinkedBlocks[i].m_blockIndex].get();
1550 ASSERT(!block->isLinked);
1551 Node& node = m_graph[block->last()];
1552 ASSERT(node.op() == Jump);
1553 ASSERT(node.takenBlockIndex() == NoBlock);
1554 node.setTakenBlockIndex(m_graph.m_blocks.size());
1555 inlineStackEntry.m_unlinkedBlocks[i].m_needsEarlyReturnLinking = false;
1556 #if !ASSERT_DISABLED
1557 block->isLinked = true;
1561 // Need to create a new basic block for the continuation at the caller.
1562 OwnPtr<BasicBlock> block = adoptPtr(new BasicBlock(nextOffset, m_numArguments, m_numLocals));
1563 #if DFG_ENABLE(DEBUG_VERBOSE)
1564 dataLogF("Creating inline epilogue basic block %p, #%zu for %p bc#%u at inline depth %u.\n", block.get(), m_graph.m_blocks.size(), m_inlineStackTop->executable(), m_currentIndex, CodeOrigin::inlineDepthForCallFrame(m_inlineStackTop->m_inlineCallFrame));
1566 m_currentBlock = block.get();
1567 ASSERT(m_inlineStackTop->m_caller->m_blockLinkingTargets.isEmpty() || m_graph.m_blocks[m_inlineStackTop->m_caller->m_blockLinkingTargets.last()]->bytecodeBegin < nextOffset);
1568 m_inlineStackTop->m_caller->m_unlinkedBlocks.append(UnlinkedBlock(m_graph.m_blocks.size()));
1569 m_inlineStackTop->m_caller->m_blockLinkingTargets.append(m_graph.m_blocks.size());
1570 m_graph.m_blocks.append(block.release());
1571 prepareToParseBlock();
1573 // At this point we return and continue to generate code for the caller, but
1574 // in the new basic block.
1575 #if DFG_ENABLE(DEBUG_VERBOSE)
1576 dataLogF("Done inlining executable %p, continuing code generation in new block.\n", executable);
1581 void ByteCodeParser::setIntrinsicResult(bool usesResult, int resultOperand, NodeIndex nodeIndex)
1585 set(resultOperand, nodeIndex);
1588 bool ByteCodeParser::handleMinMax(bool usesResult, int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis)
1590 if (argumentCountIncludingThis == 1) { // Math.min()
1591 setIntrinsicResult(usesResult, resultOperand, constantNaN());
1595 if (argumentCountIncludingThis == 2) { // Math.min(x)
1596 // FIXME: what we'd really like is a ValueToNumber, except we don't support that right now. Oh well.
1597 NodeIndex result = get(registerOffset + argumentToOperand(1));
1598 addToGraph(CheckNumber, result);
1599 setIntrinsicResult(usesResult, resultOperand, result);
1603 if (argumentCountIncludingThis == 3) { // Math.min(x, y)
1604 setIntrinsicResult(usesResult, resultOperand, addToGraph(op, get(registerOffset + argumentToOperand(1)), get(registerOffset + argumentToOperand(2))));
1608 // Don't handle >=3 arguments for now.
1612 // FIXME: We dead-code-eliminate unused Math intrinsics, but that's invalid because
1613 // they need to perform the ToNumber conversion, which can have side-effects.
1614 bool ByteCodeParser::handleIntrinsic(bool usesResult, int resultOperand, Intrinsic intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction)
1616 switch (intrinsic) {
1617 case AbsIntrinsic: {
1618 if (argumentCountIncludingThis == 1) { // Math.abs()
1619 setIntrinsicResult(usesResult, resultOperand, constantNaN());
1623 if (!MacroAssembler::supportsFloatingPointAbs())
1626 NodeIndex nodeIndex = addToGraph(ArithAbs, get(registerOffset + argumentToOperand(1)));
1627 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
1628 m_graph[nodeIndex].mergeFlags(NodeMayOverflow);
1629 setIntrinsicResult(usesResult, resultOperand, nodeIndex);
1634 return handleMinMax(usesResult, resultOperand, ArithMin, registerOffset, argumentCountIncludingThis);
1637 return handleMinMax(usesResult, resultOperand, ArithMax, registerOffset, argumentCountIncludingThis);
1639 case SqrtIntrinsic: {
1640 if (argumentCountIncludingThis == 1) { // Math.sqrt()
1641 setIntrinsicResult(usesResult, resultOperand, constantNaN());
1645 if (!MacroAssembler::supportsFloatingPointSqrt())
1648 setIntrinsicResult(usesResult, resultOperand, addToGraph(ArithSqrt, get(registerOffset + argumentToOperand(1))));
1652 case ArrayPushIntrinsic: {
1653 if (argumentCountIncludingThis != 2)
1656 ArrayMode arrayMode = getArrayMode(m_currentInstruction[5].u.arrayProfile);
1657 if (!arrayMode.isJSArray())
1659 switch (arrayMode.type()) {
1660 case Array::Undecided:
1663 case Array::Contiguous:
1664 case Array::ArrayStorage: {
1665 NodeIndex arrayPush = addToGraph(ArrayPush, OpInfo(arrayMode.asWord()), OpInfo(prediction), get(registerOffset + argumentToOperand(0)), get(registerOffset + argumentToOperand(1)));
1667 set(resultOperand, arrayPush);
1677 case ArrayPopIntrinsic: {
1678 if (argumentCountIncludingThis != 1)
1681 ArrayMode arrayMode = getArrayMode(m_currentInstruction[5].u.arrayProfile);
1682 if (!arrayMode.isJSArray())
1684 switch (arrayMode.type()) {
1687 case Array::Contiguous:
1688 case Array::ArrayStorage: {
1689 NodeIndex arrayPop = addToGraph(ArrayPop, OpInfo(arrayMode.asWord()), OpInfo(prediction), get(registerOffset + argumentToOperand(0)));
1691 set(resultOperand, arrayPop);
1700 case CharCodeAtIntrinsic: {
1701 if (argumentCountIncludingThis != 2)
1704 int thisOperand = registerOffset + argumentToOperand(0);
1705 int indexOperand = registerOffset + argumentToOperand(1);
1706 NodeIndex charCode = addToGraph(StringCharCodeAt, OpInfo(ArrayMode(Array::String).asWord()), get(thisOperand), getToInt32(indexOperand));
1709 set(resultOperand, charCode);
1713 case CharAtIntrinsic: {
1714 if (argumentCountIncludingThis != 2)
1717 int thisOperand = registerOffset + argumentToOperand(0);
1718 int indexOperand = registerOffset + argumentToOperand(1);
1719 NodeIndex charCode = addToGraph(StringCharAt, OpInfo(ArrayMode(Array::String).asWord()), get(thisOperand), getToInt32(indexOperand));
1722 set(resultOperand, charCode);
1726 case RegExpExecIntrinsic: {
1727 if (argumentCountIncludingThis != 2)
1730 NodeIndex regExpExec = addToGraph(RegExpExec, OpInfo(0), OpInfo(prediction), get(registerOffset + argumentToOperand(0)), get(registerOffset + argumentToOperand(1)));
1732 set(resultOperand, regExpExec);
1737 case RegExpTestIntrinsic: {
1738 if (argumentCountIncludingThis != 2)
1741 NodeIndex regExpExec = addToGraph(RegExpTest, OpInfo(0), OpInfo(prediction), get(registerOffset + argumentToOperand(0)), get(registerOffset + argumentToOperand(1)));
1743 set(resultOperand, regExpExec);
1753 bool ByteCodeParser::handleConstantInternalFunction(
1754 bool usesResult, int resultOperand, InternalFunction* function, int registerOffset,
1755 int argumentCountIncludingThis, SpeculatedType prediction, CodeSpecializationKind kind)
1757 // If we ever find that we have a lot of internal functions that we specialize for,
1758 // then we should probably have some sort of hashtable dispatch, or maybe even
1759 // dispatch straight through the MethodTable of the InternalFunction. But for now,
1760 // it seems that this case is hit infrequently enough, and the number of functions
1761 // we know about is small enough, that having just a linear cascade of if statements
1764 UNUSED_PARAM(prediction); // Remove this once we do more things.
1765 UNUSED_PARAM(kind); // Remove this once we do more things.
1767 if (function->classInfo() == &ArrayConstructor::s_info) {
1768 if (argumentCountIncludingThis == 2) {
1770 usesResult, resultOperand,
1771 addToGraph(NewArrayWithSize, OpInfo(ArrayWithUndecided), get(registerOffset + argumentToOperand(1))));
1775 for (int i = 1; i < argumentCountIncludingThis; ++i)
1776 addVarArgChild(get(registerOffset + argumentToOperand(i)));
1778 usesResult, resultOperand,
1779 addToGraph(Node::VarArg, NewArray, OpInfo(ArrayWithUndecided), OpInfo(0)));
1786 NodeIndex ByteCodeParser::handleGetByOffset(SpeculatedType prediction, NodeIndex base, unsigned identifierNumber, PropertyOffset offset)
1788 NodeIndex propertyStorage;
1789 if (isInlineOffset(offset))
1790 propertyStorage = base;
1792 propertyStorage = addToGraph(GetButterfly, base);
1793 // FIXME: It would be far more efficient for load elimination (and safer from
1794 // an OSR standpoint) if GetByOffset also referenced the object we were loading
1795 // from, and if we could load eliminate a GetByOffset even if the butterfly
1796 // had changed. That would be a great success.
1797 NodeIndex getByOffset = addToGraph(GetByOffset, OpInfo(m_graph.m_storageAccessData.size()), OpInfo(prediction), propertyStorage);
1799 StorageAccessData storageAccessData;
1800 storageAccessData.offset = indexRelativeToBase(offset);
1801 storageAccessData.identifierNumber = identifierNumber;
1802 m_graph.m_storageAccessData.append(storageAccessData);
1807 void ByteCodeParser::handleGetByOffset(
1808 int destinationOperand, SpeculatedType prediction, NodeIndex base, unsigned identifierNumber,
1809 PropertyOffset offset)
1811 set(destinationOperand, handleGetByOffset(prediction, base, identifierNumber, offset));
1814 void ByteCodeParser::handleGetById(
1815 int destinationOperand, SpeculatedType prediction, NodeIndex base, unsigned identifierNumber,
1816 const GetByIdStatus& getByIdStatus)
1818 if (!getByIdStatus.isSimple()
1819 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)) {
1820 set(destinationOperand,
1822 getByIdStatus.makesCalls() ? GetByIdFlush : GetById,
1823 OpInfo(identifierNumber), OpInfo(prediction), base));
1827 ASSERT(getByIdStatus.structureSet().size());
1829 // The implementation of GetByOffset does not know to terminate speculative
1830 // execution if it doesn't have a prediction, so we do it manually.
1831 if (prediction == SpecNone)
1832 addToGraph(ForceOSRExit);
1834 NodeIndex originalBaseForBaselineJIT = base;
1836 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(getByIdStatus.structureSet())), base);
1838 if (!getByIdStatus.chain().isEmpty()) {
1839 Structure* currentStructure = getByIdStatus.structureSet().singletonStructure();
1840 JSObject* currentObject = 0;
1841 for (unsigned i = 0; i < getByIdStatus.chain().size(); ++i) {
1842 currentObject = asObject(currentStructure->prototypeForLookup(m_inlineStackTop->m_codeBlock));
1843 currentStructure = getByIdStatus.chain()[i];
1844 base = addStructureTransitionCheck(currentObject, currentStructure);
1848 // Unless we want bugs like https://bugs.webkit.org/show_bug.cgi?id=88783, we need to
1849 // ensure that the base of the original get_by_id is kept alive until we're done with
1850 // all of the speculations. We only insert the Phantom if there had been a CheckStructure
1851 // on something other than the base following the CheckStructure on base, or if the
1852 // access was compiled to a WeakJSConstant specific value, in which case we might not
1853 // have any explicit use of the base at all.
1854 if (getByIdStatus.specificValue() || originalBaseForBaselineJIT != base)
1855 addToGraph(Phantom, originalBaseForBaselineJIT);
1857 if (getByIdStatus.specificValue()) {
1858 ASSERT(getByIdStatus.specificValue().isCell());
1860 set(destinationOperand, cellConstant(getByIdStatus.specificValue().asCell()));
1865 destinationOperand, prediction, base, identifierNumber, getByIdStatus.offset());
1868 void ByteCodeParser::prepareToParseBlock()
1870 for (unsigned i = 0; i < m_constants.size(); ++i)
1871 m_constants[i] = ConstantRecord();
1872 m_cellConstantNodes.clear();
1875 bool ByteCodeParser::parseResolveOperations(SpeculatedType prediction, unsigned identifier, unsigned operations, unsigned putToBaseOperation, NodeIndex* base, NodeIndex* value)
1877 ResolveOperations* resolveOperations = m_codeBlock->resolveOperations(operations);
1878 if (resolveOperations->isEmpty()) {
1879 addToGraph(ForceOSRExit);
1882 JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
1884 bool skippedScopes = false;
1885 bool setBase = false;
1886 ResolveOperation* pc = resolveOperations->data();
1887 NodeIndex localBase = 0;
1888 bool resolvingBase = true;
1889 while (resolvingBase) {
1890 switch (pc->m_operation) {
1891 case ResolveOperation::ReturnGlobalObjectAsBase:
1892 *base = cellConstant(globalObject);
1896 case ResolveOperation::SetBaseToGlobal:
1897 *base = cellConstant(globalObject);
1899 resolvingBase = false;
1903 case ResolveOperation::SetBaseToUndefined:
1904 *base = constantUndefined();
1906 resolvingBase = false;
1910 case ResolveOperation::SetBaseToScope:
1911 localBase = addToGraph(GetScope, OpInfo(skipCount));
1915 resolvingBase = false;
1917 // Reset the scope skipping as we've already loaded it
1918 skippedScopes = false;
1921 case ResolveOperation::ReturnScopeAsBase:
1922 *base = addToGraph(GetScope, OpInfo(skipCount));
1926 case ResolveOperation::SkipTopScopeNode:
1927 if (m_inlineStackTop->m_inlineCallFrame)
1930 skippedScopes = true;
1934 case ResolveOperation::SkipScopes:
1935 if (m_inlineStackTop->m_inlineCallFrame)
1937 skipCount += pc->m_scopesToSkip;
1938 skippedScopes = true;
1942 case ResolveOperation::CheckForDynamicEntriesBeforeGlobalScope:
1945 case ResolveOperation::Fail:
1949 resolvingBase = false;
1953 localBase = addToGraph(GetScope, OpInfo(skipCount));
1955 if (base && !setBase)
1959 ResolveOperation* resolveValueOperation = pc;
1960 switch (resolveValueOperation->m_operation) {
1961 case ResolveOperation::GetAndReturnGlobalProperty: {
1962 ResolveGlobalStatus status = ResolveGlobalStatus::computeFor(m_inlineStackTop->m_profiledBlock, m_currentIndex, resolveValueOperation, m_codeBlock->identifier(identifier));
1963 if (status.isSimple()) {
1964 ASSERT(status.structure());
1966 NodeIndex globalObjectNode = addStructureTransitionCheck(globalObject, status.structure());
1968 if (status.specificValue()) {
1969 ASSERT(status.specificValue().isCell());
1970 *value = cellConstant(status.specificValue().asCell());
1972 *value = handleGetByOffset(prediction, globalObjectNode, identifier, status.offset());
1976 NodeIndex resolve = addToGraph(ResolveGlobal, OpInfo(m_graph.m_resolveGlobalData.size()), OpInfo(prediction));
1977 m_graph.m_resolveGlobalData.append(ResolveGlobalData());
1978 ResolveGlobalData& data = m_graph.m_resolveGlobalData.last();
1979 data.identifierNumber = identifier;
1980 data.resolveOperationsIndex = operations;
1981 data.putToBaseOperationIndex = putToBaseOperation;
1982 data.resolvePropertyIndex = resolveValueOperation - resolveOperations->data();
1986 case ResolveOperation::GetAndReturnGlobalVar: {
1987 *value = addToGraph(GetGlobalVar,
1988 OpInfo(globalObject->assertRegisterIsInThisObject(pc->m_registerAddress)),
1989 OpInfo(prediction));
1992 case ResolveOperation::GetAndReturnGlobalVarWatchable: {
1993 SpeculatedType prediction = getPrediction();
1995 JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
1997 Identifier ident = m_codeBlock->identifier(identifier);
1998 SymbolTableEntry entry = globalObject->symbolTable()->get(ident.impl());
1999 if (!entry.couldBeWatched()) {
2000 *value = addToGraph(GetGlobalVar, OpInfo(globalObject->assertRegisterIsInThisObject(pc->m_registerAddress)), OpInfo(prediction));
2004 // The watchpoint is still intact! This means that we will get notified if the
2005 // current value in the global variable changes. So, we can inline that value.
2006 // Moreover, currently we can assume that this value is a JSFunction*, which
2007 // implies that it's a cell. This simplifies things, since in general we'd have
2008 // to use a JSConstant for non-cells and a WeakJSConstant for cells. So instead
2009 // of having both cases we just assert that the value is a cell.
2011 // NB. If it wasn't for CSE, GlobalVarWatchpoint would have no need for the
2012 // register pointer. But CSE tracks effects on global variables by comparing
2013 // register pointers. Because CSE executes multiple times while the backend
2014 // executes once, we use the following performance trade-off:
2015 // - The node refers directly to the register pointer to make CSE super cheap.
2016 // - To perform backend code generation, the node only contains the identifier
2017 // number, from which it is possible to get (via a few average-time O(1)
2018 // lookups) to the WatchpointSet.
2020 addToGraph(GlobalVarWatchpoint, OpInfo(globalObject->assertRegisterIsInThisObject(pc->m_registerAddress)), OpInfo(identifier));
2022 JSValue specificValue = globalObject->registerAt(entry.getIndex()).get();
2023 ASSERT(specificValue.isCell());
2024 *value = cellConstant(specificValue.asCell());
2027 case ResolveOperation::GetAndReturnScopedVar: {
2028 NodeIndex getScopeRegisters = addToGraph(GetScopeRegisters, localBase);
2029 *value = addToGraph(GetScopedVar, OpInfo(resolveValueOperation->m_offset), OpInfo(prediction), getScopeRegisters);
2039 bool ByteCodeParser::parseBlock(unsigned limit)
2041 bool shouldContinueParsing = true;
2043 Interpreter* interpreter = m_globalData->interpreter;
2044 Instruction* instructionsBegin = m_inlineStackTop->m_codeBlock->instructions().begin();
2045 unsigned blockBegin = m_currentIndex;
2047 // If we are the first basic block, introduce markers for arguments. This allows
2048 // us to track if a use of an argument may use the actual argument passed, as
2049 // opposed to using a value we set explicitly.
2050 if (m_currentBlock == m_graph.m_blocks[0].get() && !m_inlineStackTop->m_inlineCallFrame) {
2051 m_graph.m_arguments.resize(m_numArguments);
2052 for (unsigned argument = 0; argument < m_numArguments; ++argument) {
2053 VariableAccessData* variable = newVariableAccessData(
2054 argumentToOperand(argument), m_codeBlock->isCaptured(argumentToOperand(argument)));
2055 variable->mergeStructureCheckHoistingFailed(
2056 m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
2057 NodeIndex setArgument = addToGraph(SetArgument, OpInfo(variable));
2058 m_graph.m_arguments[argument] = setArgument;
2059 m_currentBlock->variablesAtHead.setArgumentFirstTime(argument, setArgument);
2060 m_currentBlock->variablesAtTail.setArgumentFirstTime(argument, setArgument);
2065 m_currentProfilingIndex = m_currentIndex;
2067 // Don't extend over jump destinations.
2068 if (m_currentIndex == limit) {
2069 // Ordinarily we want to plant a jump. But refuse to do this if the block is
2070 // empty. This is a special case for inlining, which might otherwise create
2071 // some empty blocks in some cases. When parseBlock() returns with an empty
2072 // block, it will get repurposed instead of creating a new one. Note that this
2073 // logic relies on every bytecode resulting in one or more nodes, which would
2074 // be true anyway except for op_loop_hint, which emits a Phantom to force this
2076 if (!m_currentBlock->isEmpty())
2077 addToGraph(Jump, OpInfo(m_currentIndex));
2079 #if DFG_ENABLE(DEBUG_VERBOSE)
2080 dataLogF("Refusing to plant jump at limit %u because block %p is empty.\n", limit, m_currentBlock);
2083 return shouldContinueParsing;
2086 // Switch on the current bytecode opcode.
2087 Instruction* currentInstruction = instructionsBegin + m_currentIndex;
2088 m_currentInstruction = currentInstruction; // Some methods want to use this, and we'd rather not thread it through calls.
2089 OpcodeID opcodeID = interpreter->getOpcodeID(currentInstruction->u.opcode);
2092 // === Function entry opcodes ===
2095 // Initialize all locals to undefined.
2096 for (int i = 0; i < m_inlineStackTop->m_codeBlock->m_numVars; ++i)
2097 set(i, constantUndefined(), SetOnEntry);
2098 NEXT_OPCODE(op_enter);
2100 case op_convert_this: {
2101 NodeIndex op1 = getThis();
2102 if (m_graph[op1].op() != ConvertThis) {
2103 ValueProfile* profile =
2104 m_inlineStackTop->m_profiledBlock->valueProfileForBytecodeOffset(m_currentProfilingIndex);
2105 profile->computeUpdatedPrediction();
2106 #if DFG_ENABLE(DEBUG_VERBOSE)
2107 dataLogF("[@%lu bc#%u]: profile %p: ", m_graph.size(), m_currentProfilingIndex, profile);
2108 profile->dump(WTF::dataFile());
2111 if (profile->m_singletonValueIsTop
2112 || !profile->m_singletonValue
2113 || !profile->m_singletonValue.isCell()
2114 || profile->m_singletonValue.asCell()->classInfo() != &Structure::s_info)
2115 setThis(addToGraph(ConvertThis, op1));
2119 OpInfo(m_graph.addStructureSet(jsCast<Structure*>(profile->m_singletonValue.asCell()))),
2123 NEXT_OPCODE(op_convert_this);
2126 case op_create_this: {
2127 int calleeOperand = currentInstruction[2].u.operand;
2128 NodeIndex callee = get(calleeOperand);
2129 bool alreadyEmitted = false;
2130 if (m_graph[callee].op() == WeakJSConstant) {
2131 JSCell* cell = m_graph[callee].weakConstant();
2132 ASSERT(cell->inherits(&JSFunction::s_info));
2134 JSFunction* function = jsCast<JSFunction*>(cell);
2135 Structure* inheritorID = function->tryGetKnownInheritorID();
2137 addToGraph(InheritorIDWatchpoint, OpInfo(function));
2138 set(currentInstruction[1].u.operand, addToGraph(NewObject, OpInfo(inheritorID)));
2139 alreadyEmitted = true;
2142 if (!alreadyEmitted)
2143 set(currentInstruction[1].u.operand, addToGraph(CreateThis, callee));
2144 NEXT_OPCODE(op_create_this);
2147 case op_new_object: {
2148 set(currentInstruction[1].u.operand, addToGraph(NewObject, OpInfo(m_inlineStackTop->m_codeBlock->globalObject()->emptyObjectStructure())));
2149 NEXT_OPCODE(op_new_object);
2152 case op_new_array: {
2153 int startOperand = currentInstruction[2].u.operand;
2154 int numOperands = currentInstruction[3].u.operand;
2155 ArrayAllocationProfile* profile = currentInstruction[4].u.arrayAllocationProfile;
2156 for (int operandIdx = startOperand; operandIdx < startOperand + numOperands; ++operandIdx)
2157 addVarArgChild(get(operandIdx));
2158 set(currentInstruction[1].u.operand, addToGraph(Node::VarArg, NewArray, OpInfo(profile->selectIndexingType()), OpInfo(0)));
2159 NEXT_OPCODE(op_new_array);
2162 case op_new_array_with_size: {
2163 int lengthOperand = currentInstruction[2].u.operand;
2164 ArrayAllocationProfile* profile = currentInstruction[3].u.arrayAllocationProfile;
2165 set(currentInstruction[1].u.operand, addToGraph(NewArrayWithSize, OpInfo(profile->selectIndexingType()), get(lengthOperand)));
2166 NEXT_OPCODE(op_new_array_with_size);
2169 case op_new_array_buffer: {
2170 int startConstant = currentInstruction[2].u.operand;
2171 int numConstants = currentInstruction[3].u.operand;
2172 ArrayAllocationProfile* profile = currentInstruction[4].u.arrayAllocationProfile;
2173 NewArrayBufferData data;
2174 data.startConstant = m_inlineStackTop->m_constantBufferRemap[startConstant];
2175 data.numConstants = numConstants;
2176 data.indexingType = profile->selectIndexingType();
2178 // If this statement has never executed, we'll have the wrong indexing type in the profile.
2179 for (int i = 0; i < numConstants; ++i) {
2181 leastUpperBoundOfIndexingTypeAndValue(
2183 m_codeBlock->constantBuffer(data.startConstant)[i]);
2186 m_graph.m_newArrayBufferData.append(data);
2187 set(currentInstruction[1].u.operand, addToGraph(NewArrayBuffer, OpInfo(&m_graph.m_newArrayBufferData.last())));
2188 NEXT_OPCODE(op_new_array_buffer);
2191 case op_new_regexp: {
2192 set(currentInstruction[1].u.operand, addToGraph(NewRegexp, OpInfo(currentInstruction[2].u.operand)));
2193 NEXT_OPCODE(op_new_regexp);
2196 case op_get_callee: {
2197 ValueProfile* profile = currentInstruction[2].u.profile;
2198 profile->computeUpdatedPrediction();
2199 if (profile->m_singletonValueIsTop
2200 || !profile->m_singletonValue
2201 || !profile->m_singletonValue.isCell())
2202 set(currentInstruction[1].u.operand, get(JSStack::Callee));
2204 ASSERT(profile->m_singletonValue.asCell()->inherits(&JSFunction::s_info));
2205 NodeIndex actualCallee = get(JSStack::Callee);
2206 addToGraph(CheckFunction, OpInfo(profile->m_singletonValue.asCell()), actualCallee);
2207 set(currentInstruction[1].u.operand, addToGraph(WeakJSConstant, OpInfo(profile->m_singletonValue.asCell())));
2209 NEXT_OPCODE(op_get_callee);
2212 // === Bitwise operations ===
2215 NodeIndex op1 = getToInt32(currentInstruction[2].u.operand);
2216 NodeIndex op2 = getToInt32(currentInstruction[3].u.operand);
2217 set(currentInstruction[1].u.operand, addToGraph(BitAnd, op1, op2));
2218 NEXT_OPCODE(op_bitand);
2222 NodeIndex op1 = getToInt32(currentInstruction[2].u.operand);
2223 NodeIndex op2 = getToInt32(currentInstruction[3].u.operand);
2224 set(currentInstruction[1].u.operand, addToGraph(BitOr, op1, op2));
2225 NEXT_OPCODE(op_bitor);
2229 NodeIndex op1 = getToInt32(currentInstruction[2].u.operand);
2230 NodeIndex op2 = getToInt32(currentInstruction[3].u.operand);
2231 set(currentInstruction[1].u.operand, addToGraph(BitXor, op1, op2));
2232 NEXT_OPCODE(op_bitxor);
2236 NodeIndex op1 = getToInt32(currentInstruction[2].u.operand);
2237 NodeIndex op2 = getToInt32(currentInstruction[3].u.operand);
2239 // Optimize out shifts by zero.
2240 if (isInt32Constant(op2) && !(valueOfInt32Constant(op2) & 0x1f))
2243 result = addToGraph(BitRShift, op1, op2);
2244 set(currentInstruction[1].u.operand, result);
2245 NEXT_OPCODE(op_rshift);
2249 NodeIndex op1 = getToInt32(currentInstruction[2].u.operand);
2250 NodeIndex op2 = getToInt32(currentInstruction[3].u.operand);
2252 // Optimize out shifts by zero.
2253 if (isInt32Constant(op2) && !(valueOfInt32Constant(op2) & 0x1f))
2256 result = addToGraph(BitLShift, op1, op2);
2257 set(currentInstruction[1].u.operand, result);
2258 NEXT_OPCODE(op_lshift);
2262 NodeIndex op1 = getToInt32(currentInstruction[2].u.operand);
2263 NodeIndex op2 = getToInt32(currentInstruction[3].u.operand);
2265 // The result of a zero-extending right shift is treated as an unsigned value.
2266 // This means that if the top bit is set, the result is not in the int32 range,
2267 // and as such must be stored as a double. If the shift amount is a constant,
2268 // we may be able to optimize.
2269 if (isInt32Constant(op2)) {
2270 // If we know we are shifting by a non-zero amount, then since the operation
2271 // zero fills we know the top bit of the result must be zero, and as such the
2272 // result must be within the int32 range. Conversely, if this is a shift by
2273 // zero, then the result may be changed by the conversion to unsigned, but it
2274 // is not necessary to perform the shift!
2275 if (valueOfInt32Constant(op2) & 0x1f)
2276 result = addToGraph(BitURShift, op1, op2);
2278 result = makeSafe(addToGraph(UInt32ToNumber, op1));
2280 // Cannot optimize at this stage; shift & potentially rebox as a double.
2281 result = addToGraph(BitURShift, op1, op2);
2282 result = makeSafe(addToGraph(UInt32ToNumber, result));
2284 set(currentInstruction[1].u.operand, result);
2285 NEXT_OPCODE(op_urshift);
2288 // === Increment/Decrement opcodes ===
2291 unsigned srcDst = currentInstruction[1].u.operand;
2292 NodeIndex op = get(srcDst);
2293 set(srcDst, makeSafe(addToGraph(ArithAdd, op, one())));
2294 NEXT_OPCODE(op_pre_inc);
2298 unsigned result = currentInstruction[1].u.operand;
2299 unsigned srcDst = currentInstruction[2].u.operand;
2300 ASSERT(result != srcDst); // Required for assumptions we make during OSR.
2301 NodeIndex op = get(srcDst);
2303 set(srcDst, makeSafe(addToGraph(ArithAdd, op, one())));
2304 NEXT_OPCODE(op_post_inc);
2308 unsigned srcDst = currentInstruction[1].u.operand;
2309 NodeIndex op = get(srcDst);
2310 set(srcDst, makeSafe(addToGraph(ArithSub, op, one())));
2311 NEXT_OPCODE(op_pre_dec);
2315 unsigned result = currentInstruction[1].u.operand;
2316 unsigned srcDst = currentInstruction[2].u.operand;
2317 NodeIndex op = get(srcDst);
2319 set(srcDst, makeSafe(addToGraph(ArithSub, op, one())));
2320 NEXT_OPCODE(op_post_dec);
2323 // === Arithmetic operations ===
2326 NodeIndex op1 = get(currentInstruction[2].u.operand);
2327 NodeIndex op2 = get(currentInstruction[3].u.operand);
2328 if (m_graph[op1].hasNumberResult() && m_graph[op2].hasNumberResult())
2329 set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithAdd, op1, op2)));
2331 set(currentInstruction[1].u.operand, makeSafe(addToGraph(ValueAdd, op1, op2)));
2332 NEXT_OPCODE(op_add);
2336 NodeIndex op1 = get(currentInstruction[2].u.operand);
2337 NodeIndex op2 = get(currentInstruction[3].u.operand);
2338 set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithSub, op1, op2)));
2339 NEXT_OPCODE(op_sub);
2343 NodeIndex op1 = get(currentInstruction[2].u.operand);
2344 set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithNegate, op1)));
2345 NEXT_OPCODE(op_negate);
2349 // Multiply requires that the inputs are not truncated, unfortunately.
2350 NodeIndex op1 = get(currentInstruction[2].u.operand);
2351 NodeIndex op2 = get(currentInstruction[3].u.operand);
2352 set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithMul, op1, op2)));
2353 NEXT_OPCODE(op_mul);
2357 NodeIndex op1 = get(currentInstruction[2].u.operand);
2358 NodeIndex op2 = get(currentInstruction[3].u.operand);
2359 set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithMod, op1, op2)));
2360 NEXT_OPCODE(op_mod);
2364 NodeIndex op1 = get(currentInstruction[2].u.operand);
2365 NodeIndex op2 = get(currentInstruction[3].u.operand);
2366 set(currentInstruction[1].u.operand, makeDivSafe(addToGraph(ArithDiv, op1, op2)));
2367 NEXT_OPCODE(op_div);
2370 // === Misc operations ===
2372 #if ENABLE(DEBUG_WITH_BREAKPOINT)
2374 addToGraph(Breakpoint);
2375 NEXT_OPCODE(op_debug);
2378 NodeIndex op = get(currentInstruction[2].u.operand);
2379 set(currentInstruction[1].u.operand, op);
2380 NEXT_OPCODE(op_mov);
2383 case op_check_has_instance:
2384 addToGraph(CheckHasInstance, get(currentInstruction[3].u.operand));
2385 NEXT_OPCODE(op_check_has_instance);
2387 case op_instanceof: {
2388 NodeIndex value = get(currentInstruction[2].u.operand);
2389 NodeIndex prototype = get(currentInstruction[3].u.operand);
2390 set(currentInstruction[1].u.operand, addToGraph(InstanceOf, value, prototype));
2391 NEXT_OPCODE(op_instanceof);
2394 case op_is_undefined: {
2395 NodeIndex value = get(currentInstruction[2].u.operand);
2396 set(currentInstruction[1].u.operand, addToGraph(IsUndefined, value));
2397 NEXT_OPCODE(op_is_undefined);
2400 case op_is_boolean: {
2401 NodeIndex value = get(currentInstruction[2].u.operand);
2402 set(currentInstruction[1].u.operand, addToGraph(IsBoolean, value));
2403 NEXT_OPCODE(op_is_boolean);
2406 case op_is_number: {
2407 NodeIndex value = get(currentInstruction[2].u.operand);
2408 set(currentInstruction[1].u.operand, addToGraph(IsNumber, value));
2409 NEXT_OPCODE(op_is_number);
2412 case op_is_string: {
2413 NodeIndex value = get(currentInstruction[2].u.operand);
2414 set(currentInstruction[1].u.operand, addToGraph(IsString, value));
2415 NEXT_OPCODE(op_is_string);
2418 case op_is_object: {
2419 NodeIndex value = get(currentInstruction[2].u.operand);
2420 set(currentInstruction[1].u.operand, addToGraph(IsObject, value));
2421 NEXT_OPCODE(op_is_object);
2424 case op_is_function: {
2425 NodeIndex value = get(currentInstruction[2].u.operand);
2426 set(currentInstruction[1].u.operand, addToGraph(IsFunction, value));
2427 NEXT_OPCODE(op_is_function);
2431 NodeIndex value = get(currentInstruction[2].u.operand);
2432 set(currentInstruction[1].u.operand, addToGraph(LogicalNot, value));
2433 NEXT_OPCODE(op_not);
2436 case op_to_primitive: {
2437 NodeIndex value = get(currentInstruction[2].u.operand);
2438 set(currentInstruction[1].u.operand, addToGraph(ToPrimitive, value));
2439 NEXT_OPCODE(op_to_primitive);
2443 int startOperand = currentInstruction[2].u.operand;
2444 int numOperands = currentInstruction[3].u.operand;
2445 for (int operandIdx = startOperand; operandIdx < startOperand + numOperands; ++operandIdx)
2446 addVarArgChild(get(operandIdx));
2447 set(currentInstruction[1].u.operand, addToGraph(Node::VarArg, StrCat, OpInfo(0), OpInfo(0)));
2448 NEXT_OPCODE(op_strcat);
2452 NodeIndex op1 = get(currentInstruction[2].u.operand);
2453 NodeIndex op2 = get(currentInstruction[3].u.operand);
2454 set(currentInstruction[1].u.operand, addToGraph(CompareLess, op1, op2));
2455 NEXT_OPCODE(op_less);
2459 NodeIndex op1 = get(currentInstruction[2].u.operand);
2460 NodeIndex op2 = get(currentInstruction[3].u.operand);
2461 set(currentInstruction[1].u.operand, addToGraph(CompareLessEq, op1, op2));
2462 NEXT_OPCODE(op_lesseq);
2466 NodeIndex op1 = get(currentInstruction[2].u.operand);
2467 NodeIndex op2 = get(currentInstruction[3].u.operand);
2468 set(currentInstruction[1].u.operand, addToGraph(CompareGreater, op1, op2));
2469 NEXT_OPCODE(op_greater);
2472 case op_greatereq: {
2473 NodeIndex op1 = get(currentInstruction[2].u.operand);
2474 NodeIndex op2 = get(currentInstruction[3].u.operand);
2475 set(currentInstruction[1].u.operand, addToGraph(CompareGreaterEq, op1, op2));
2476 NEXT_OPCODE(op_greatereq);
2480 NodeIndex op1 = get(currentInstruction[2].u.operand);
2481 NodeIndex op2 = get(currentInstruction[3].u.operand);
2482 set(currentInstruction[1].u.operand, addToGraph(CompareEq, op1, op2));
2487 NodeIndex value = get(currentInstruction[2].u.operand);
2488 set(currentInstruction[1].u.operand, addToGraph(CompareEq, value, constantNull()));
2489 NEXT_OPCODE(op_eq_null);
2493 NodeIndex op1 = get(currentInstruction[2].u.operand);
2494 NodeIndex op2 = get(currentInstruction[3].u.operand);
2495 set(currentInstruction[1].u.operand, addToGraph(CompareStrictEq, op1, op2));
2496 NEXT_OPCODE(op_stricteq);
2500 NodeIndex op1 = get(currentInstruction[2].u.operand);
2501 NodeIndex op2 = get(currentInstruction[3].u.operand);
2502 set(currentInstruction[1].u.operand, addToGraph(LogicalNot, addToGraph(CompareEq, op1, op2)));
2503 NEXT_OPCODE(op_neq);
2507 NodeIndex value = get(currentInstruction[2].u.operand);
2508 set(currentInstruction[1].u.operand, addToGraph(LogicalNot, addToGraph(CompareEq, value, constantNull())));
2509 NEXT_OPCODE(op_neq_null);
2512 case op_nstricteq: {
2513 NodeIndex op1 = get(currentInstruction[2].u.operand);
2514 NodeIndex op2 = get(currentInstruction[3].u.operand);
2515 set(currentInstruction[1].u.operand, addToGraph(LogicalNot, addToGraph(CompareStrictEq, op1, op2)));
2516 NEXT_OPCODE(op_nstricteq);
2519 // === Property access operations ===
2521 case op_get_by_val: {
2522 SpeculatedType prediction = getPrediction();
2524 NodeIndex base = get(currentInstruction[2].u.operand);
2525 ArrayMode arrayMode = getArrayModeAndEmitChecks(currentInstruction[4].u.arrayProfile, Array::Read, base);
2526 NodeIndex property = get(currentInstruction[3].u.operand);
2527 NodeIndex getByVal = addToGraph(GetByVal, OpInfo(arrayMode.asWord()), OpInfo(prediction), base, property);
2528 set(currentInstruction[1].u.operand, getByVal);
2530 NEXT_OPCODE(op_get_by_val);
2533 case op_put_by_val: {
2534 NodeIndex base = get(currentInstruction[1].u.operand);
2536 ArrayMode arrayMode = getArrayModeAndEmitChecks(currentInstruction[4].u.arrayProfile, Array::Write, base);
2538 NodeIndex property = get(currentInstruction[2].u.operand);
2539 NodeIndex value = get(currentInstruction[3].u.operand);
2541 addVarArgChild(base);
2542 addVarArgChild(property);
2543 addVarArgChild(value);
2544 addVarArgChild(NoNode); // Leave room for property storage.
2545 addToGraph(Node::VarArg, PutByVal, OpInfo(arrayMode.asWord()), OpInfo(0));
2547 NEXT_OPCODE(op_put_by_val);
2551 case op_get_by_id_out_of_line:
2552 case op_get_array_length: {
2553 SpeculatedType prediction = getPrediction();
2555 NodeIndex base = get(currentInstruction[2].u.operand);
2556 unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand];
2558 Identifier identifier = m_codeBlock->identifier(identifierNumber);
2559 GetByIdStatus getByIdStatus = GetByIdStatus::computeFor(
2560 m_inlineStackTop->m_profiledBlock, m_currentIndex, identifier);
2563 currentInstruction[1].u.operand, prediction, base, identifierNumber, getByIdStatus);
2565 NEXT_OPCODE(op_get_by_id);
2568 case op_put_by_id_out_of_line:
2569 case op_put_by_id_transition_direct:
2570 case op_put_by_id_transition_normal:
2571 case op_put_by_id_transition_direct_out_of_line:
2572 case op_put_by_id_transition_normal_out_of_line: {
2573 NodeIndex value = get(currentInstruction[3].u.operand);
2574 NodeIndex base = get(currentInstruction[1].u.operand);
2575 unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
2576 bool direct = currentInstruction[8].u.operand;
2578 PutByIdStatus putByIdStatus = PutByIdStatus::computeFor(
2579 m_inlineStackTop->m_profiledBlock,
2581 m_codeBlock->identifier(identifierNumber));
2582 if (!putByIdStatus.isSet())
2583 addToGraph(ForceOSRExit);
2585 bool hasExitSite = m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache);
2587 if (!hasExitSite && putByIdStatus.isSimpleReplace()) {
2588 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(putByIdStatus.oldStructure())), base);
2589 NodeIndex propertyStorage;
2590 if (isInlineOffset(putByIdStatus.offset()))
2591 propertyStorage = base;
2593 propertyStorage = addToGraph(GetButterfly, base);
2594 addToGraph(PutByOffset, OpInfo(m_graph.m_storageAccessData.size()), propertyStorage, base, value);
2596 StorageAccessData storageAccessData;
2597 storageAccessData.offset = indexRelativeToBase(putByIdStatus.offset());
2598 storageAccessData.identifierNumber = identifierNumber;
2599 m_graph.m_storageAccessData.append(storageAccessData);
2600 } else if (!hasExitSite
2601 && putByIdStatus.isSimpleTransition()
2602 && structureChainIsStillValid(
2604 putByIdStatus.oldStructure(),
2605 putByIdStatus.structureChain())) {
2607 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(putByIdStatus.oldStructure())), base);
2609 if (!putByIdStatus.oldStructure()->storedPrototype().isNull()) {
2610 addStructureTransitionCheck(
2611 putByIdStatus.oldStructure()->storedPrototype().asCell());
2614 for (WriteBarrier<Structure>* it = putByIdStatus.structureChain()->head(); *it; ++it) {
2615 JSValue prototype = (*it)->storedPrototype();
2616 if (prototype.isNull())
2618 ASSERT(prototype.isCell());
2619 addStructureTransitionCheck(prototype.asCell());
2622 ASSERT(putByIdStatus.oldStructure()->transitionWatchpointSetHasBeenInvalidated());
2624 NodeIndex propertyStorage;
2625 StructureTransitionData* transitionData =
2626 m_graph.addStructureTransitionData(
2627 StructureTransitionData(
2628 putByIdStatus.oldStructure(),
2629 putByIdStatus.newStructure()));
2631 if (putByIdStatus.oldStructure()->outOfLineCapacity()
2632 != putByIdStatus.newStructure()->outOfLineCapacity()) {
2634 // If we're growing the property storage then it must be because we're
2635 // storing into the out-of-line storage.
2636 ASSERT(!isInlineOffset(putByIdStatus.offset()));
2638 if (!putByIdStatus.oldStructure()->outOfLineCapacity()) {
2639 propertyStorage = addToGraph(
2640 AllocatePropertyStorage, OpInfo(transitionData), base);
2642 propertyStorage = addToGraph(
2643 ReallocatePropertyStorage, OpInfo(transitionData),
2644 base, addToGraph(GetButterfly, base));
2647 if (isInlineOffset(putByIdStatus.offset()))
2648 propertyStorage = base;
2650 propertyStorage = addToGraph(GetButterfly, base);
2653 addToGraph(PutStructure, OpInfo(transitionData), base);
2657 OpInfo(m_graph.m_storageAccessData.size()),
2662 StorageAccessData storageAccessData;
2663 storageAccessData.offset = indexRelativeToBase(putByIdStatus.offset());
2664 storageAccessData.identifierNumber = identifierNumber;
2665 m_graph.m_storageAccessData.append(storageAccessData);
2668 addToGraph(PutByIdDirect, OpInfo(identifierNumber), base, value);
2670 addToGraph(PutById, OpInfo(identifierNumber), base, value);
2673 NEXT_OPCODE(op_put_by_id);
2676 case op_init_global_const_nop: {
2677 NEXT_OPCODE(op_init_global_const_nop);
2680 case op_init_global_const: {
2681 NodeIndex value = get(currentInstruction[2].u.operand);
2684 OpInfo(m_inlineStackTop->m_codeBlock->globalObject()->assertRegisterIsInThisObject(currentInstruction[1].u.registerPointer)),
2686 NEXT_OPCODE(op_init_global_const);
2689 case op_init_global_const_check: {
2690 NodeIndex value = get(currentInstruction[2].u.operand);
2691 CodeBlock* codeBlock = m_inlineStackTop->m_codeBlock;
2692 JSGlobalObject* globalObject = codeBlock->globalObject();
2693 unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[4].u.operand];
2694 Identifier identifier = m_codeBlock->identifier(identifierNumber);
2695 SymbolTableEntry entry = globalObject->symbolTable()->get(identifier.impl());
2696 if (!entry.couldBeWatched()) {
2699 OpInfo(globalObject->assertRegisterIsInThisObject(currentInstruction[1].u.registerPointer)),
2701 NEXT_OPCODE(op_init_global_const_check);
2705 OpInfo(codeBlock->globalObject()->assertRegisterIsInThisObject(currentInstruction[1].u.registerPointer)),
2706 OpInfo(identifierNumber),
2708 NEXT_OPCODE(op_init_global_const_check);
2712 // === Block terminators. ===
2715 unsigned relativeOffset = currentInstruction[1].u.operand;
2716 addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
2717 LAST_OPCODE(op_jmp);
2721 unsigned relativeOffset = currentInstruction[1].u.operand;
2722 addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
2723 LAST_OPCODE(op_loop);
2727 unsigned relativeOffset = currentInstruction[2].u.operand;
2728 NodeIndex condition = get(currentInstruction[1].u.operand);
2729 addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jtrue)), condition);
2730 LAST_OPCODE(op_jtrue);
2734 unsigned relativeOffset = currentInstruction[2].u.operand;
2735 NodeIndex condition = get(currentInstruction[1].u.operand);
2736 addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jfalse)), OpInfo(m_currentIndex + relativeOffset), condition);
2737 LAST_OPCODE(op_jfalse);
2740 case op_loop_if_true: {
2741 unsigned relativeOffset = currentInstruction[2].u.operand;
2742 NodeIndex condition = get(currentInstruction[1].u.operand);
2743 addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_loop_if_true)), condition);
2744 LAST_OPCODE(op_loop_if_true);
2747 case op_loop_if_false: {
2748 unsigned relativeOffset = currentInstruction[2].u.operand;
2749 NodeIndex condition = get(currentInstruction[1].u.operand);
2750 addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_loop_if_false)), OpInfo(m_currentIndex + relativeOffset), condition);
2751 LAST_OPCODE(op_loop_if_false);
2755 unsigned relativeOffset = currentInstruction[2].u.operand;
2756 NodeIndex value = get(currentInstruction[1].u.operand);
2757 NodeIndex condition = addToGraph(CompareEq, value, constantNull());
2758 addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jeq_null)), condition);
2759 LAST_OPCODE(op_jeq_null);
2762 case op_jneq_null: {
2763 unsigned relativeOffset = currentInstruction[2].u.operand;
2764 NodeIndex value = get(currentInstruction[1].u.operand);
2765 NodeIndex condition = addToGraph(CompareEq, value, constantNull());
2766 addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jneq_null)), OpInfo(m_currentIndex + relativeOffset), condition);
2767 LAST_OPCODE(op_jneq_null);
2771 unsigned relativeOffset = currentInstruction[3].u.operand;
2772 NodeIndex op1 = get(currentInstruction[1].u.operand);
2773 NodeIndex op2 = get(currentInstruction[2].u.operand);
2774 NodeIndex condition = addToGraph(CompareLess, op1, op2);
2775 addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jless)), condition);
2776 LAST_OPCODE(op_jless);
2780 unsigned relativeOffset = currentInstruction[3].u.operand;
2781 NodeIndex op1 = get(currentInstruction[1].u.operand);
2782 NodeIndex op2 = get(currentInstruction[2].u.operand);
2783 NodeIndex condition = addToGraph(CompareLessEq, op1, op2);
2784 addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jlesseq)), condition);
2785 LAST_OPCODE(op_jlesseq);
2789 unsigned relativeOffset = currentInstruction[3].u.operand;
2790 NodeIndex op1 = get(currentInstruction[1].u.operand);
2791 NodeIndex op2 = get(currentInstruction[2].u.operand);
2792 NodeIndex condition = addToGraph(CompareGreater, op1, op2);
2793 addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jgreater)), condition);
2794 LAST_OPCODE(op_jgreater);
2797 case op_jgreatereq: {
2798 unsigned relativeOffset = currentInstruction[3].u.operand;
2799 NodeIndex op1 = get(currentInstruction[1].u.operand);
2800 NodeIndex op2 = get(currentInstruction[2].u.operand);
2801 NodeIndex condition = addToGraph(CompareGreaterEq, op1, op2);
2802 addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jgreatereq)), condition);
2803 LAST_OPCODE(op_jgreatereq);
2807 unsigned relativeOffset = currentInstruction[3].u.operand;
2808 NodeIndex op1 = get(currentInstruction[1].u.operand);
2809 NodeIndex op2 = get(currentInstruction[2].u.operand);
2810 NodeIndex condition = addToGraph(CompareLess, op1, op2);
2811 addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jnless)), OpInfo(m_currentIndex + relativeOffset), condition);
2812 LAST_OPCODE(op_jnless);
2816 unsigned relativeOffset = currentInstruction[3].u.operand;
2817 NodeIndex op1 = get(currentInstruction[1].u.operand);
2818 NodeIndex op2 = get(currentInstruction[2].u.operand);
2819 NodeIndex condition = addToGraph(CompareLessEq, op1, op2);
2820 addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jnlesseq)), OpInfo(m_currentIndex + relativeOffset), condition);
2821 LAST_OPCODE(op_jnlesseq);
2824 case op_jngreater: {
2825 unsigned relativeOffset = currentInstruction[3].u.operand;
2826 NodeIndex op1 = get(currentInstruction[1].u.operand);
2827 NodeIndex op2 = get(currentInstruction[2].u.operand);
2828 NodeIndex condition = addToGraph(CompareGreater, op1, op2);
2829 addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jngreater)), OpInfo(m_currentIndex + relativeOffset), condition);
2830 LAST_OPCODE(op_jngreater);
2833 case op_jngreatereq: {
2834 unsigned relativeOffset = currentInstruction[3].u.operand;
2835 NodeIndex op1 = get(currentInstruction[1].u.operand);
2836 NodeIndex op2 = get(currentInstruction[2].u.operand);
2837 NodeIndex condition = addToGraph(CompareGreaterEq, op1, op2);
2838 addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jngreatereq)), OpInfo(m_currentIndex + relativeOffset), condition);
2839 LAST_OPCODE(op_jngreatereq);
2842 case op_loop_if_less: {
2843 unsigned relativeOffset = currentInstruction[3].u.operand;
2844 NodeIndex op1 = get(currentInstruction[1].u.operand);
2845 NodeIndex op2 = get(currentInstruction[2].u.operand);
2846 NodeIndex condition = addToGraph(CompareLess, op1, op2);
2847 addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_loop_if_less)), condition);
2848 LAST_OPCODE(op_loop_if_less);
2851 case op_loop_if_lesseq: {
2852 unsigned relativeOffset = currentInstruction[3].u.operand;
2853 NodeIndex op1 = get(currentInstruction[1].u.operand);
2854 NodeIndex op2 = get(currentInstruction[2].u.operand);
2855 NodeIndex condition = addToGraph(CompareLessEq, op1, op2);
2856 addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_loop_if_lesseq)), condition);
2857 LAST_OPCODE(op_loop_if_lesseq);
2860 case op_loop_if_greater: {
2861 unsigned relativeOffset = currentInstruction[3].u.operand;
2862 NodeIndex op1 = get(currentInstruction[1].u.operand);
2863 NodeIndex op2 = get(currentInstruction[2].u.operand);
2864 NodeIndex condition = addToGraph(CompareGreater, op1, op2);
2865 addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_loop_if_greater)), condition);
2866 LAST_OPCODE(op_loop_if_greater);
2869 case op_loop_if_greatereq: {
2870 unsigned relativeOffset = currentInstruction[3].u.operand;
2871 NodeIndex op1 = get(currentInstruction[1].u.operand);
2872 NodeIndex op2 = get(currentInstruction[2].u.operand);
2873 NodeIndex condition = addToGraph(CompareGreaterEq, op1, op2);
2874 addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_loop_if_greatereq)), condition);
2875 LAST_OPCODE(op_loop_if_greatereq);
2879 flushArgumentsAndCapturedVariables();
2880 if (m_inlineStackTop->m_inlineCallFrame) {
2881 if (m_inlineStackTop->m_returnValue != InvalidVirtualRegister)
2882 setDirect(m_inlineStackTop->m_returnValue, get(currentInstruction[1].u.operand));
2883 m_inlineStackTop->m_didReturn = true;
2884 if (m_inlineStackTop->m_unlinkedBlocks.isEmpty()) {
2885 // If we're returning from the first block, then we're done parsing.
2886 ASSERT(m_inlineStackTop->m_callsiteBlockHead == m_graph.m_blocks.size() - 1);
2887 shouldContinueParsing = false;
2888 LAST_OPCODE(op_ret);
2890 // If inlining created blocks, and we're doing a return, then we need some
2892 ASSERT(m_inlineStackTop->m_unlinkedBlocks.last().m_blockIndex == m_graph.m_blocks.size() - 1);
2893 m_inlineStackTop->m_unlinkedBlocks.last().m_needsNormalLinking = false;
2895 if (m_currentIndex + OPCODE_LENGTH(op_ret) != m_inlineStackTop->m_codeBlock->instructions().size() || m_inlineStackTop->m_didEarlyReturn) {
2896 ASSERT(m_currentIndex + OPCODE_LENGTH(op_ret) <= m_inlineStackTop->m_codeBlock->instructions().size());
2897 addToGraph(Jump, OpInfo(NoBlock));
2898 m_inlineStackTop->m_unlinkedBlocks.last().m_needsEarlyReturnLinking = true;
2899 m_inlineStackTop->m_didEarlyReturn = true;
2901 LAST_OPCODE(op_ret);
2903 addToGraph(Return, get(currentInstruction[1].u.operand));
2904 LAST_OPCODE(op_ret);
2907 flushArgumentsAndCapturedVariables();
2908 ASSERT(!m_inlineStackTop->m_inlineCallFrame);
2909 addToGraph(Return, get(currentInstruction[1].u.operand));
2910 LAST_OPCODE(op_end);
2913 flushArgumentsAndCapturedVariables();
2914 addToGraph(Throw, get(currentInstruction[1].u.operand));
2915 LAST_OPCODE(op_throw);
2917 case op_throw_static_error:
2918 flushArgumentsAndCapturedVariables();
2919 addToGraph(ThrowReferenceError);
2920 LAST_OPCODE(op_throw_static_error);
2923 handleCall(interpreter, currentInstruction, Call, CodeForCall);
2924 NEXT_OPCODE(op_call);
2927 handleCall(interpreter, currentInstruction, Construct, CodeForConstruct);
2928 NEXT_OPCODE(op_construct);
2930 case op_call_varargs: {
2931 ASSERT(m_inlineStackTop->m_inlineCallFrame);
2932 ASSERT(currentInstruction[3].u.operand == m_inlineStackTop->m_codeBlock->argumentsRegister());
2933 ASSERT(!m_inlineStackTop->m_codeBlock->symbolTable()->slowArguments());
2934 // It would be cool to funnel this into handleCall() so that it can handle
2935 // inlining. But currently that won't be profitable anyway, since none of the
2936 // uses of call_varargs will be inlineable. So we set this up manually and
2937 // without inline/intrinsic detection.
2939 Instruction* putInstruction = currentInstruction + OPCODE_LENGTH(op_call_varargs);
2941 SpeculatedType prediction = SpecNone;
2942 if (interpreter->getOpcodeID(putInstruction->u.opcode) == op_call_put_result) {
2943 m_currentProfilingIndex = m_currentIndex + OPCODE_LENGTH(op_call_varargs);
2944 prediction = getPrediction();
2947 addToGraph(CheckArgumentsNotCreated);
2949 unsigned argCount = m_inlineStackTop->m_inlineCallFrame->arguments.size();
2950 if (JSStack::CallFrameHeaderSize + argCount > m_parameterSlots)
2951 m_parameterSlots = JSStack::CallFrameHeaderSize + argCount;
2953 addVarArgChild(get(currentInstruction[1].u.operand)); // callee
2954 addVarArgChild(get(currentInstruction[2].u.operand)); // this
2955 for (unsigned argument = 1; argument < argCount; ++argument)
2956 addVarArgChild(get(argumentToOperand(argument)));
2958 NodeIndex call = addToGraph(Node::VarArg, Call, OpInfo(0), OpInfo(prediction));
2959 if (interpreter->getOpcodeID(putInstruction->u.opcode) == op_call_put_result)
2960 set(putInstruction[1].u.operand, call);
2962 NEXT_OPCODE(op_call_varargs);
2965 case op_call_put_result:
2966 NEXT_OPCODE(op_call_put_result);
2969 // Statically speculate for now. It makes sense to let speculate-only jneq_ptr
2970 // support simmer for a while before making it more general, since it's
2971 // already gnarly enough as it is.
2972 ASSERT(pointerIsFunction(currentInstruction[2].u.specialPointer));
2975 OpInfo(actualPointerFor(m_inlineStackTop->m_codeBlock, currentInstruction[2].u.specialPointer)),
2976 get(currentInstruction[1].u.operand));
2977 addToGraph(Jump, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jneq_ptr)));
2978 LAST_OPCODE(op_jneq_ptr);
2981 case op_resolve_global_property:
2982 case op_resolve_global_var:
2983 case op_resolve_scoped_var:
2984 case op_resolve_scoped_var_on_top_scope:
2985 case op_resolve_scoped_var_with_top_scope_check: {
2986 SpeculatedType prediction = getPrediction();
2988 unsigned identifier = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
2989 unsigned operations = m_inlineStackTop->m_resolveOperationRemap[currentInstruction[3].u.operand];
2990 NodeIndex value = 0;
2991 if (parseResolveOperations(prediction, identifier, operations, 0, 0, &value)) {
2992 set(currentInstruction[1].u.operand, value);
2993 NEXT_OPCODE(op_resolve);
2996 NodeIndex resolve = addToGraph(Resolve, OpInfo(m_graph.m_resolveOperationsData.size()), OpInfo(prediction));
2997 m_graph.m_resolveOperationsData.append(ResolveOperationData());
2998 ResolveOperationData& data = m_graph.m_resolveOperationsData.last();
2999 data.identifierNumber = identifier;
3000 data.resolveOperationsIndex = operations;
3002 set(currentInstruction[1].u.operand, resolve);
3004 NEXT_OPCODE(op_resolve);
3007 case op_put_to_base_variable:
3008 case op_put_to_base: {
3009 unsigned base = currentInstruction[1].u.operand;
3010 unsigned identifier = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
3011 unsigned value = currentInstruction[3].u.operand;
3012 unsigned operation = m_inlineStackTop->m_putToBaseOperationRemap[currentInstruction[4].u.operand];
3013 PutToBaseOperation* putToBase = m_codeBlock->putToBaseOperation(operation);
3015 if (putToBase->m_isDynamic) {
3016 addToGraph(Phantom, get(base));
3017 addToGraph(PutById, OpInfo(identifier), get(base), get(value));
3018 NEXT_OPCODE(op_put_to_base);
3021 switch (putToBase->m_kind) {
3022 case PutToBaseOperation::Uninitialised:
3023 addToGraph(Phantom, get(base));
3024 addToGraph(ForceOSRExit);
3027 case PutToBaseOperation::GlobalVariablePutChecked: {
3028 CodeBlock* codeBlock = m_inlineStackTop->m_codeBlock;
3029 JSGlobalObject* globalObject = codeBlock->globalObject();
3030 SymbolTableEntry entry = globalObject->symbolTable()->get(m_codeBlock->identifier(identifier).impl());
3031 if (entry.couldBeWatched()) {
3032 addToGraph(PutGlobalVarCheck,
3033 OpInfo(codeBlock->globalObject()->assertRegisterIsInThisObject(putToBase->m_registerAddress)),
3039 case PutToBaseOperation::GlobalVariablePut:
3040 addToGraph(PutGlobalVar,
3041 OpInfo(m_inlineStackTop->m_codeBlock->globalObject()->assertRegisterIsInThisObject(putToBase->m_registerAddress)),
3044 case PutToBaseOperation::VariablePut: {
3045 addToGraph(Phantom, get(base));
3046 NodeIndex getScope = addToGraph(GetScope, OpInfo(putToBase->m_scopeDepth));
3047 NodeIndex getScopeRegisters = addToGraph(GetScopeRegisters, getScope);
3048 addToGraph(PutScopedVar, OpInfo(putToBase->m_offset), getScope, getScopeRegisters, get(value));
3051 case PutToBaseOperation::GlobalPropertyPut: {
3052 if (!putToBase->m_structure) {
3053 addToGraph(Phantom, get(base));
3054 addToGraph(ForceOSRExit);
3055 NEXT_OPCODE(op_put_to_base);
3057 NodeIndex baseNode = get(base);
3058 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(putToBase->m_structure.get())), baseNode);
3059 NodeIndex propertyStorage;
3060 if (isInlineOffset(putToBase->m_offset))
3061 propertyStorage = baseNode;
3063 propertyStorage = addToGraph(GetButterfly, baseNode);
3064 addToGraph(PutByOffset, OpInfo(m_graph.m_storageAccessData.size()), propertyStorage, baseNode, get(value));
3066 StorageAccessData storageAccessData;
3067 storageAccessData.offset = indexRelativeToBase(putToBase->m_offset);
3068 storageAccessData.identifierNumber = identifier;
3069 m_graph.m_storageAccessData.append(storageAccessData);
3072 case PutToBaseOperation::Readonly:
3073 case PutToBaseOperation::Generic:
3074 addToGraph(Phantom, get(base));
3075 addToGraph(PutById, OpInfo(identifier), get(base), get(value));
3077 NEXT_OPCODE(op_put_to_base);
3080 case op_resolve_base_to_global:
3081 case op_resolve_base_to_global_dynamic:
3082 case op_resolve_base_to_scope:
3083 case op_resolve_base_to_scope_with_top_scope_check:
3084 case op_resolve_base: {
3085 SpeculatedType prediction = getPrediction();
3087 unsigned identifier = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
3088 unsigned operations = m_inlineStackTop->m_resolveOperationRemap[currentInstruction[4].u.operand];
3089 unsigned putToBaseOperation = m_inlineStackTop->m_putToBaseOperationRemap[currentInstruction[5].u.operand];