2 * Copyright (C) 2011, 2012, 2013, 2014 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "DFGByteCodeParser.h"
31 #include "ArrayConstructor.h"
32 #include "CallLinkStatus.h"
33 #include "CodeBlock.h"
34 #include "CodeBlockWithJITType.h"
35 #include "DFGArrayMode.h"
36 #include "DFGCapabilities.h"
37 #include "DFGJITCode.h"
38 #include "GetByIdStatus.h"
40 #include "JSActivation.h"
41 #include "JSCInlines.h"
42 #include "PreciseJumpTargets.h"
43 #include "PutByIdStatus.h"
44 #include "StackAlignment.h"
45 #include "StringConstructor.h"
46 #include <wtf/CommaPrinter.h>
47 #include <wtf/HashMap.h>
48 #include <wtf/MathExtras.h>
49 #include <wtf/StdLibExtras.h>
51 namespace JSC { namespace DFG {
53 static const bool verbose = false;
55 class ConstantBufferKey {
63 ConstantBufferKey(WTF::HashTableDeletedValueType)
69 ConstantBufferKey(CodeBlock* codeBlock, unsigned index)
70 : m_codeBlock(codeBlock)
75 bool operator==(const ConstantBufferKey& other) const
77 return m_codeBlock == other.m_codeBlock
78 && m_index == other.m_index;
83 return WTF::PtrHash<CodeBlock*>::hash(m_codeBlock) ^ m_index;
86 bool isHashTableDeletedValue() const
88 return !m_codeBlock && m_index;
91 CodeBlock* codeBlock() const { return m_codeBlock; }
92 unsigned index() const { return m_index; }
95 CodeBlock* m_codeBlock;
99 struct ConstantBufferKeyHash {
100 static unsigned hash(const ConstantBufferKey& key) { return key.hash(); }
101 static bool equal(const ConstantBufferKey& a, const ConstantBufferKey& b)
106 static const bool safeToCompareToEmptyOrDeleted = true;
109 } } // namespace JSC::DFG
113 template<typename T> struct DefaultHash;
114 template<> struct DefaultHash<JSC::DFG::ConstantBufferKey> {
115 typedef JSC::DFG::ConstantBufferKeyHash Hash;
118 template<typename T> struct HashTraits;
119 template<> struct HashTraits<JSC::DFG::ConstantBufferKey> : SimpleClassHashTraits<JSC::DFG::ConstantBufferKey> { };
123 namespace JSC { namespace DFG {
125 // === ByteCodeParser ===
127 // This class is used to compile the dataflow graph from a CodeBlock.
128 class ByteCodeParser {
130 ByteCodeParser(Graph& graph)
132 , m_codeBlock(graph.m_codeBlock)
133 , m_profiledBlock(graph.m_profiledBlock)
137 , m_constantUndefined(graph.freeze(jsUndefined()))
138 , m_constantNull(graph.freeze(jsNull()))
139 , m_constantNaN(graph.freeze(jsNumber(PNaN)))
140 , m_constantOne(graph.freeze(jsNumber(1)))
141 , m_numArguments(m_codeBlock->numParameters())
142 , m_numLocals(m_codeBlock->m_numCalleeRegisters)
143 , m_parameterSlots(0)
144 , m_numPassedVarArgs(0)
145 , m_inlineStackTop(0)
146 , m_haveBuiltOperandMaps(false)
147 , m_currentInstruction(0)
149 ASSERT(m_profiledBlock);
152 // Parse a full CodeBlock of bytecode.
156 struct InlineStackEntry;
158 // Just parse from m_currentIndex to the end of the current CodeBlock.
159 void parseCodeBlock();
161 void ensureLocals(unsigned newNumLocals)
163 if (newNumLocals <= m_numLocals)
165 m_numLocals = newNumLocals;
166 for (size_t i = 0; i < m_graph.numBlocks(); ++i)
167 m_graph.block(i)->ensureLocals(newNumLocals);
170 // Helper for min and max.
171 bool handleMinMax(int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis);
173 // Handle calls. This resolves issues surrounding inlining and intrinsics.
175 int result, NodeType op, InlineCallFrame::Kind, unsigned instructionSize,
176 Node* callTarget, int argCount, int registerOffset, CallLinkStatus,
177 SpeculatedType prediction);
179 int result, NodeType op, InlineCallFrame::Kind, unsigned instructionSize,
180 Node* callTarget, int argCount, int registerOffset, CallLinkStatus);
181 void handleCall(int result, NodeType op, CodeSpecializationKind, unsigned instructionSize, int callee, int argCount, int registerOffset);
182 void handleCall(Instruction* pc, NodeType op, CodeSpecializationKind);
183 void emitFunctionChecks(CallVariant, Node* callTarget, int registerOffset, CodeSpecializationKind);
184 void undoFunctionChecks(CallVariant);
185 void emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis, CodeSpecializationKind);
186 unsigned inliningCost(CallVariant, int argumentCountIncludingThis, CodeSpecializationKind); // Return UINT_MAX if it's not an inlining candidate. By convention, intrinsics have a cost of 1.
187 // Handle inlining. Return true if it succeeded, false if we need to plant a call.
188 bool handleInlining(Node* callTargetNode, int resultOperand, const CallLinkStatus&, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, NodeType callOp, InlineCallFrame::Kind, SpeculatedType prediction);
189 enum CallerLinkability { CallerDoesNormalLinking, CallerLinksManually };
190 bool attemptToInlineCall(Node* callTargetNode, int resultOperand, CallVariant, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind, CallerLinkability, SpeculatedType prediction, unsigned& inliningBalance);
191 void inlineCall(Node* callTargetNode, int resultOperand, CallVariant, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind, CallerLinkability);
192 void cancelLinkingForBlock(InlineStackEntry*, BasicBlock*); // Only works when the given block is the last one to have been added for that inline stack entry.
193 // Handle intrinsic functions. Return true if it succeeded, false if we need to plant a call.
194 bool handleIntrinsic(int resultOperand, Intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction);
195 bool handleTypedArrayConstructor(int resultOperand, InternalFunction*, int registerOffset, int argumentCountIncludingThis, TypedArrayType);
196 bool handleConstantInternalFunction(int resultOperand, InternalFunction*, int registerOffset, int argumentCountIncludingThis, CodeSpecializationKind);
197 Node* handlePutByOffset(Node* base, unsigned identifier, PropertyOffset, Node* value);
198 Node* handleGetByOffset(SpeculatedType, Node* base, const StructureSet&, unsigned identifierNumber, PropertyOffset, NodeType op = GetByOffset);
200 int destinationOperand, SpeculatedType, Node* base, unsigned identifierNumber,
201 const GetByIdStatus&);
203 Node* base, unsigned identifierNumber, Node* value, const PutByIdStatus&, bool isDirect);
205 Node* base, unsigned identifierNumber, Node* value, const PutByIdStatus&,
207 void emitChecks(const ConstantStructureCheckVector&);
209 Node* getScope(unsigned skipCount);
211 void prepareToParseBlock();
214 // Parse a single basic block of bytecode instructions.
215 bool parseBlock(unsigned limit);
216 // Link block successors.
217 void linkBlock(BasicBlock*, Vector<BasicBlock*>& possibleTargets);
218 void linkBlocks(Vector<UnlinkedBlock>& unlinkedBlocks, Vector<BasicBlock*>& possibleTargets);
220 VariableAccessData* newVariableAccessData(VirtualRegister operand, bool isCaptured)
222 ASSERT(!operand.isConstant());
224 m_graph.m_variableAccessData.append(VariableAccessData(operand, isCaptured));
225 return &m_graph.m_variableAccessData.last();
228 // Get/Set the operands/result of a bytecode instruction.
229 Node* getDirect(VirtualRegister operand)
231 ASSERT(!operand.isConstant());
233 // Is this an argument?
234 if (operand.isArgument())
235 return getArgument(operand);
238 return getLocal(operand);
241 Node* get(VirtualRegister operand)
243 if (operand.isConstant()) {
244 unsigned constantIndex = operand.toConstantIndex();
245 unsigned oldSize = m_constants.size();
246 if (constantIndex >= oldSize || !m_constants[constantIndex]) {
247 JSValue value = m_inlineStackTop->m_codeBlock->getConstant(operand.offset());
248 if (constantIndex >= oldSize) {
249 m_constants.grow(constantIndex + 1);
250 for (unsigned i = oldSize; i < m_constants.size(); ++i)
251 m_constants[i] = nullptr;
253 m_constants[constantIndex] =
254 addToGraph(JSConstant, OpInfo(m_graph.freezeStrong(value)));
256 ASSERT(m_constants[constantIndex]);
257 return m_constants[constantIndex];
260 if (inlineCallFrame()) {
261 if (!inlineCallFrame()->isClosureCall) {
262 JSFunction* callee = inlineCallFrame()->calleeConstant();
263 if (operand.offset() == JSStack::Callee)
264 return weakJSConstant(callee);
265 if (operand.offset() == JSStack::ScopeChain)
266 return weakJSConstant(callee->scope());
268 } else if (operand.offset() == JSStack::Callee)
269 return addToGraph(GetCallee);
270 else if (operand.offset() == JSStack::ScopeChain)
271 return addToGraph(GetMyScope);
273 return getDirect(m_inlineStackTop->remapOperand(operand));
277 // A normal set which follows a two-phase commit that spans code origins. During
278 // the current code origin it issues a MovHint, and at the start of the next
279 // code origin there will be a SetLocal. If the local needs flushing, the second
280 // SetLocal will be preceded with a Flush.
283 // A set where the SetLocal happens immediately and there is still a Flush. This
284 // is relevant when assigning to a local in tricky situations for the delayed
285 // SetLocal logic but where we know that we have not performed any side effects
286 // within this code origin. This is a safe replacement for NormalSet anytime we
287 // know that we have not yet performed side effects in this code origin.
288 ImmediateSetWithFlush,
290 // A set where the SetLocal happens immediately and we do not Flush it even if
291 // this is a local that is marked as needing it. This is relevant when
292 // initializing locals at the top of a function.
295 Node* setDirect(VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
297 addToGraph(MovHint, OpInfo(operand.offset()), value);
299 DelayedSetLocal delayed = DelayedSetLocal(operand, value);
301 if (setMode == NormalSet) {
302 m_setLocalQueue.append(delayed);
306 return delayed.execute(this, setMode);
309 void processSetLocalQueue()
311 for (unsigned i = 0; i < m_setLocalQueue.size(); ++i)
312 m_setLocalQueue[i].execute(this);
313 m_setLocalQueue.resize(0);
316 Node* set(VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
318 return setDirect(m_inlineStackTop->remapOperand(operand), value, setMode);
321 Node* injectLazyOperandSpeculation(Node* node)
323 ASSERT(node->op() == GetLocal);
324 ASSERT(node->origin.semantic.bytecodeIndex == m_currentIndex);
325 ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
326 LazyOperandValueProfileKey key(m_currentIndex, node->local());
327 SpeculatedType prediction = m_inlineStackTop->m_lazyOperands.prediction(locker, key);
328 node->variableAccessData()->predict(prediction);
332 // Used in implementing get/set, above, where the operand is a local variable.
333 Node* getLocal(VirtualRegister operand)
335 unsigned local = operand.toLocal();
337 if (local < m_localWatchpoints.size()) {
338 if (VariableWatchpointSet* set = m_localWatchpoints[local]) {
339 if (JSValue value = set->inferredValue()) {
340 addToGraph(FunctionReentryWatchpoint, OpInfo(m_codeBlock->symbolTable()));
341 addToGraph(VariableWatchpoint, OpInfo(set));
342 return weakJSConstant(value);
347 Node* node = m_currentBlock->variablesAtTail.local(local);
348 bool isCaptured = m_codeBlock->isCaptured(operand, inlineCallFrame());
350 // This has two goals: 1) link together variable access datas, and 2)
351 // try to avoid creating redundant GetLocals. (1) is required for
352 // correctness - no other phase will ensure that block-local variable
353 // access data unification is done correctly. (2) is purely opportunistic
354 // and is meant as an compile-time optimization only.
356 VariableAccessData* variable;
359 variable = node->variableAccessData();
360 variable->mergeIsCaptured(isCaptured);
363 switch (node->op()) {
367 return node->child1().node();
373 variable = newVariableAccessData(operand, isCaptured);
375 node = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variable)));
376 m_currentBlock->variablesAtTail.local(local) = node;
380 Node* setLocal(VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
382 unsigned local = operand.toLocal();
383 bool isCaptured = m_codeBlock->isCaptured(operand, inlineCallFrame());
385 if (setMode != ImmediateNakedSet) {
386 ArgumentPosition* argumentPosition = findArgumentPositionForLocal(operand);
387 if (isCaptured || argumentPosition)
388 flushDirect(operand, argumentPosition);
391 VariableAccessData* variableAccessData = newVariableAccessData(operand, isCaptured);
392 variableAccessData->mergeStructureCheckHoistingFailed(
393 m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
394 variableAccessData->mergeCheckArrayHoistingFailed(
395 m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIndexingType));
396 Node* node = addToGraph(SetLocal, OpInfo(variableAccessData), value);
397 m_currentBlock->variablesAtTail.local(local) = node;
401 // Used in implementing get/set, above, where the operand is an argument.
402 Node* getArgument(VirtualRegister operand)
404 unsigned argument = operand.toArgument();
405 ASSERT(argument < m_numArguments);
407 Node* node = m_currentBlock->variablesAtTail.argument(argument);
408 bool isCaptured = m_codeBlock->isCaptured(operand);
410 VariableAccessData* variable;
413 variable = node->variableAccessData();
414 variable->mergeIsCaptured(isCaptured);
416 switch (node->op()) {
420 return node->child1().node();
425 variable = newVariableAccessData(operand, isCaptured);
427 node = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variable)));
428 m_currentBlock->variablesAtTail.argument(argument) = node;
431 Node* setArgument(VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
433 unsigned argument = operand.toArgument();
434 ASSERT(argument < m_numArguments);
436 bool isCaptured = m_codeBlock->isCaptured(operand);
438 VariableAccessData* variableAccessData = newVariableAccessData(operand, isCaptured);
440 // Always flush arguments, except for 'this'. If 'this' is created by us,
441 // then make sure that it's never unboxed.
443 if (setMode != ImmediateNakedSet)
444 flushDirect(operand);
445 } else if (m_codeBlock->specializationKind() == CodeForConstruct)
446 variableAccessData->mergeShouldNeverUnbox(true);
448 variableAccessData->mergeStructureCheckHoistingFailed(
449 m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
450 variableAccessData->mergeCheckArrayHoistingFailed(
451 m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIndexingType));
452 Node* node = addToGraph(SetLocal, OpInfo(variableAccessData), value);
453 m_currentBlock->variablesAtTail.argument(argument) = node;
457 ArgumentPosition* findArgumentPositionForArgument(int argument)
459 InlineStackEntry* stack = m_inlineStackTop;
460 while (stack->m_inlineCallFrame)
461 stack = stack->m_caller;
462 return stack->m_argumentPositions[argument];
465 ArgumentPosition* findArgumentPositionForLocal(VirtualRegister operand)
467 for (InlineStackEntry* stack = m_inlineStackTop; ; stack = stack->m_caller) {
468 InlineCallFrame* inlineCallFrame = stack->m_inlineCallFrame;
469 if (!inlineCallFrame)
471 if (operand.offset() < static_cast<int>(inlineCallFrame->stackOffset + JSStack::CallFrameHeaderSize))
473 if (operand.offset() == inlineCallFrame->stackOffset + CallFrame::thisArgumentOffset())
475 if (operand.offset() >= static_cast<int>(inlineCallFrame->stackOffset + CallFrame::thisArgumentOffset() + inlineCallFrame->arguments.size()))
477 int argument = VirtualRegister(operand.offset() - inlineCallFrame->stackOffset).toArgument();
478 return stack->m_argumentPositions[argument];
483 ArgumentPosition* findArgumentPosition(VirtualRegister operand)
485 if (operand.isArgument())
486 return findArgumentPositionForArgument(operand.toArgument());
487 return findArgumentPositionForLocal(operand);
490 void flush(VirtualRegister operand)
492 flushDirect(m_inlineStackTop->remapOperand(operand));
495 void flushDirect(VirtualRegister operand)
497 flushDirect(operand, findArgumentPosition(operand));
500 void flushDirect(VirtualRegister operand, ArgumentPosition* argumentPosition)
502 bool isCaptured = m_codeBlock->isCaptured(operand, inlineCallFrame());
504 ASSERT(!operand.isConstant());
506 Node* node = m_currentBlock->variablesAtTail.operand(operand);
508 VariableAccessData* variable;
511 variable = node->variableAccessData();
512 variable->mergeIsCaptured(isCaptured);
514 variable = newVariableAccessData(operand, isCaptured);
516 node = addToGraph(Flush, OpInfo(variable));
517 m_currentBlock->variablesAtTail.operand(operand) = node;
518 if (argumentPosition)
519 argumentPosition->addVariable(variable);
522 void flush(InlineStackEntry* inlineStackEntry)
525 if (InlineCallFrame* inlineCallFrame = inlineStackEntry->m_inlineCallFrame) {
526 numArguments = inlineCallFrame->arguments.size();
527 if (inlineCallFrame->isClosureCall) {
528 flushDirect(inlineStackEntry->remapOperand(VirtualRegister(JSStack::Callee)));
529 flushDirect(inlineStackEntry->remapOperand(VirtualRegister(JSStack::ScopeChain)));
532 numArguments = inlineStackEntry->m_codeBlock->numParameters();
533 for (unsigned argument = numArguments; argument-- > 1;)
534 flushDirect(inlineStackEntry->remapOperand(virtualRegisterForArgument(argument)));
535 for (int local = 0; local < inlineStackEntry->m_codeBlock->m_numVars; ++local) {
536 if (!inlineStackEntry->m_codeBlock->isCaptured(virtualRegisterForLocal(local)))
538 flushDirect(inlineStackEntry->remapOperand(virtualRegisterForLocal(local)));
542 void flushForTerminal()
544 for (InlineStackEntry* inlineStackEntry = m_inlineStackTop; inlineStackEntry; inlineStackEntry = inlineStackEntry->m_caller)
545 flush(inlineStackEntry);
548 void flushForReturn()
550 flush(m_inlineStackTop);
553 void flushIfTerminal(SwitchData& data)
555 if (data.fallThrough.bytecodeIndex() > m_currentIndex)
558 for (unsigned i = data.cases.size(); i--;) {
559 if (data.cases[i].target.bytecodeIndex() > m_currentIndex)
566 // Assumes that the constant should be strongly marked.
567 Node* jsConstant(JSValue constantValue)
569 return addToGraph(JSConstant, OpInfo(m_graph.freezeStrong(constantValue)));
572 Node* weakJSConstant(JSValue constantValue)
574 return addToGraph(JSConstant, OpInfo(m_graph.freeze(constantValue)));
577 // Helper functions to get/set the this value.
580 return get(m_inlineStackTop->m_codeBlock->thisRegister());
583 void setThis(Node* value)
585 set(m_inlineStackTop->m_codeBlock->thisRegister(), value);
588 InlineCallFrame* inlineCallFrame()
590 return m_inlineStackTop->m_inlineCallFrame;
593 CodeOrigin currentCodeOrigin()
595 return CodeOrigin(m_currentIndex, inlineCallFrame());
598 BranchData* branchData(unsigned taken, unsigned notTaken)
600 // We assume that branches originating from bytecode always have a fall-through. We
601 // use this assumption to avoid checking for the creation of terminal blocks.
602 ASSERT((taken > m_currentIndex) || (notTaken > m_currentIndex));
603 BranchData* data = m_graph.m_branchData.add();
604 *data = BranchData::withBytecodeIndices(taken, notTaken);
608 Node* addToGraph(NodeType op, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
610 Node* result = m_graph.addNode(
611 SpecNone, op, NodeOrigin(currentCodeOrigin()), Edge(child1), Edge(child2),
614 m_currentBlock->append(result);
617 Node* addToGraph(NodeType op, Edge child1, Edge child2 = Edge(), Edge child3 = Edge())
619 Node* result = m_graph.addNode(
620 SpecNone, op, NodeOrigin(currentCodeOrigin()), child1, child2, child3);
622 m_currentBlock->append(result);
625 Node* addToGraph(NodeType op, OpInfo info, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
627 Node* result = m_graph.addNode(
628 SpecNone, op, NodeOrigin(currentCodeOrigin()), info, Edge(child1), Edge(child2),
631 m_currentBlock->append(result);
634 Node* addToGraph(NodeType op, OpInfo info1, OpInfo info2, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
636 Node* result = m_graph.addNode(
637 SpecNone, op, NodeOrigin(currentCodeOrigin()), info1, info2,
638 Edge(child1), Edge(child2), Edge(child3));
640 m_currentBlock->append(result);
644 Node* addToGraph(Node::VarArgTag, NodeType op, OpInfo info1, OpInfo info2)
646 Node* result = m_graph.addNode(
647 SpecNone, Node::VarArg, op, NodeOrigin(currentCodeOrigin()), info1, info2,
648 m_graph.m_varArgChildren.size() - m_numPassedVarArgs, m_numPassedVarArgs);
650 m_currentBlock->append(result);
652 m_numPassedVarArgs = 0;
657 void removeLastNodeFromGraph(NodeType expectedNodeType)
659 Node* node = m_currentBlock->takeLast();
660 RELEASE_ASSERT(node->op() == expectedNodeType);
661 m_graph.m_allocator.free(node);
664 void addVarArgChild(Node* child)
666 m_graph.m_varArgChildren.append(Edge(child));
667 m_numPassedVarArgs++;
670 Node* addCallWithoutSettingResult(
671 NodeType op, OpInfo opInfo, Node* callee, int argCount, int registerOffset,
672 SpeculatedType prediction)
674 addVarArgChild(callee);
675 size_t parameterSlots = JSStack::CallFrameHeaderSize - JSStack::CallerFrameAndPCSize + argCount;
676 if (parameterSlots > m_parameterSlots)
677 m_parameterSlots = parameterSlots;
679 int dummyThisArgument = op == Call || op == NativeCall || op == ProfiledCall ? 0 : 1;
680 for (int i = 0 + dummyThisArgument; i < argCount; ++i)
681 addVarArgChild(get(virtualRegisterForArgument(i, registerOffset)));
683 return addToGraph(Node::VarArg, op, opInfo, OpInfo(prediction));
687 int result, NodeType op, OpInfo opInfo, Node* callee, int argCount, int registerOffset,
688 SpeculatedType prediction)
690 Node* call = addCallWithoutSettingResult(
691 op, opInfo, callee, argCount, registerOffset, prediction);
692 VirtualRegister resultReg(result);
693 if (resultReg.isValid())
694 set(VirtualRegister(result), call);
698 Node* cellConstantWithStructureCheck(JSCell* object, Structure* structure)
700 Node* objectNode = weakJSConstant(object);
701 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(structure)), objectNode);
705 SpeculatedType getPredictionWithoutOSRExit(unsigned bytecodeIndex)
707 ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
708 return m_inlineStackTop->m_profiledBlock->valueProfilePredictionForBytecodeOffset(locker, bytecodeIndex);
711 SpeculatedType getPrediction(unsigned bytecodeIndex)
713 SpeculatedType prediction = getPredictionWithoutOSRExit(bytecodeIndex);
715 if (prediction == SpecNone) {
716 // We have no information about what values this node generates. Give up
717 // on executing this code, since we're likely to do more damage than good.
718 addToGraph(ForceOSRExit);
724 SpeculatedType getPredictionWithoutOSRExit()
726 return getPredictionWithoutOSRExit(m_currentIndex);
729 SpeculatedType getPrediction()
731 return getPrediction(m_currentIndex);
734 ArrayMode getArrayMode(ArrayProfile* profile, Array::Action action)
736 ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
737 profile->computeUpdatedPrediction(locker, m_inlineStackTop->m_profiledBlock);
738 return ArrayMode::fromObserved(locker, profile, action, false);
741 ArrayMode getArrayMode(ArrayProfile* profile)
743 return getArrayMode(profile, Array::Read);
746 ArrayMode getArrayModeConsideringSlowPath(ArrayProfile* profile, Array::Action action)
748 ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
750 profile->computeUpdatedPrediction(locker, m_inlineStackTop->m_profiledBlock);
753 m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)
754 || profile->outOfBounds(locker);
756 ArrayMode result = ArrayMode::fromObserved(locker, profile, action, makeSafe);
761 Node* makeSafe(Node* node)
763 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
764 node->mergeFlags(NodeMayOverflowInDFG);
765 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
766 node->mergeFlags(NodeMayNegZeroInDFG);
768 if (!isX86() && node->op() == ArithMod)
771 if (!m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex))
774 switch (node->op()) {
779 case ArithMod: // for ArithMod "MayOverflow" means we tried to divide by zero, or we saw double.
780 node->mergeFlags(NodeMayOverflowInBaseline);
784 // Currently we can't tell the difference between a negation overflowing
785 // (i.e. -(1 << 31)) or generating negative zero (i.e. -0). If it took slow
786 // path then we assume that it did both of those things.
787 node->mergeFlags(NodeMayOverflowInBaseline);
788 node->mergeFlags(NodeMayNegZeroInBaseline);
792 // FIXME: We should detect cases where we only overflowed but never created
794 // https://bugs.webkit.org/show_bug.cgi?id=132470
795 if (m_inlineStackTop->m_profiledBlock->likelyToTakeDeepestSlowCase(m_currentIndex)
796 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
797 node->mergeFlags(NodeMayOverflowInBaseline | NodeMayNegZeroInBaseline);
798 else if (m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)
799 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
800 node->mergeFlags(NodeMayNegZeroInBaseline);
804 RELEASE_ASSERT_NOT_REACHED();
811 Node* makeDivSafe(Node* node)
813 ASSERT(node->op() == ArithDiv);
815 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
816 node->mergeFlags(NodeMayOverflowInDFG);
817 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
818 node->mergeFlags(NodeMayNegZeroInDFG);
820 // The main slow case counter for op_div in the old JIT counts only when
821 // the operands are not numbers. We don't care about that since we already
822 // have speculations in place that take care of that separately. We only
823 // care about when the outcome of the division is not an integer, which
824 // is what the special fast case counter tells us.
826 if (!m_inlineStackTop->m_profiledBlock->couldTakeSpecialFastCase(m_currentIndex))
829 // FIXME: It might be possible to make this more granular.
830 node->mergeFlags(NodeMayOverflowInBaseline | NodeMayNegZeroInBaseline);
835 void buildOperandMapsIfNecessary();
838 CodeBlock* m_codeBlock;
839 CodeBlock* m_profiledBlock;
842 // The current block being generated.
843 BasicBlock* m_currentBlock;
844 // The bytecode index of the current instruction being generated.
845 unsigned m_currentIndex;
847 FrozenValue* m_constantUndefined;
848 FrozenValue* m_constantNull;
849 FrozenValue* m_constantNaN;
850 FrozenValue* m_constantOne;
851 Vector<Node*, 16> m_constants;
853 // The number of arguments passed to the function.
854 unsigned m_numArguments;
855 // The number of locals (vars + temporaries) used in the function.
856 unsigned m_numLocals;
857 // The number of slots (in units of sizeof(Register)) that we need to
858 // preallocate for arguments to outgoing calls from this frame. This
859 // number includes the CallFrame slots that we initialize for the callee
860 // (but not the callee-initialized CallerFrame and ReturnPC slots).
861 // This number is 0 if and only if this function is a leaf.
862 unsigned m_parameterSlots;
863 // The number of var args passed to the next var arg node.
864 unsigned m_numPassedVarArgs;
866 HashMap<ConstantBufferKey, unsigned> m_constantBufferCache;
868 Vector<VariableWatchpointSet*, 16> m_localWatchpoints;
870 struct InlineStackEntry {
871 ByteCodeParser* m_byteCodeParser;
873 CodeBlock* m_codeBlock;
874 CodeBlock* m_profiledBlock;
875 InlineCallFrame* m_inlineCallFrame;
877 ScriptExecutable* executable() { return m_codeBlock->ownerExecutable(); }
879 QueryableExitProfile m_exitProfile;
881 // Remapping of identifier and constant numbers from the code block being
882 // inlined (inline callee) to the code block that we're inlining into
883 // (the machine code block, which is the transitive, though not necessarily
885 Vector<unsigned> m_identifierRemap;
886 Vector<unsigned> m_constantBufferRemap;
887 Vector<unsigned> m_switchRemap;
889 // Blocks introduced by this code block, which need successor linking.
890 // May include up to one basic block that includes the continuation after
891 // the callsite in the caller. These must be appended in the order that they
892 // are created, but their bytecodeBegin values need not be in order as they
894 Vector<UnlinkedBlock> m_unlinkedBlocks;
896 // Potential block linking targets. Must be sorted by bytecodeBegin, and
897 // cannot have two blocks that have the same bytecodeBegin.
898 Vector<BasicBlock*> m_blockLinkingTargets;
900 // If the callsite's basic block was split into two, then this will be
901 // the head of the callsite block. It needs its successors linked to the
902 // m_unlinkedBlocks, but not the other way around: there's no way for
903 // any blocks in m_unlinkedBlocks to jump back into this block.
904 BasicBlock* m_callsiteBlockHead;
906 // Does the callsite block head need linking? This is typically true
907 // but will be false for the machine code block's inline stack entry
908 // (since that one is not inlined) and for cases where an inline callee
909 // did the linking for us.
910 bool m_callsiteBlockHeadNeedsLinking;
912 VirtualRegister m_returnValue;
914 // Speculations about variable types collected from the profiled code block,
915 // which are based on OSR exit profiles that past DFG compilatins of this
916 // code block had gathered.
917 LazyOperandValueProfileParser m_lazyOperands;
919 CallLinkInfoMap m_callLinkInfos;
920 StubInfoMap m_stubInfos;
922 // Did we see any returns? We need to handle the (uncommon but necessary)
923 // case where a procedure that does not return was inlined.
926 // Did we have any early returns?
927 bool m_didEarlyReturn;
929 // Pointers to the argument position trackers for this slice of code.
930 Vector<ArgumentPosition*> m_argumentPositions;
932 InlineStackEntry* m_caller;
937 CodeBlock* profiledBlock,
938 BasicBlock* callsiteBlockHead,
939 JSFunction* callee, // Null if this is a closure call.
940 VirtualRegister returnValueVR,
941 VirtualRegister inlineCallFrameStart,
942 int argumentCountIncludingThis,
943 InlineCallFrame::Kind);
947 m_byteCodeParser->m_inlineStackTop = m_caller;
950 VirtualRegister remapOperand(VirtualRegister operand) const
952 if (!m_inlineCallFrame)
955 ASSERT(!operand.isConstant());
957 return VirtualRegister(operand.offset() + m_inlineCallFrame->stackOffset);
961 InlineStackEntry* m_inlineStackTop;
963 struct DelayedSetLocal {
964 VirtualRegister m_operand;
967 DelayedSetLocal() { }
968 DelayedSetLocal(VirtualRegister operand, Node* value)
974 Node* execute(ByteCodeParser* parser, SetMode setMode = NormalSet)
976 if (m_operand.isArgument())
977 return parser->setArgument(m_operand, m_value, setMode);
978 return parser->setLocal(m_operand, m_value, setMode);
982 Vector<DelayedSetLocal, 2> m_setLocalQueue;
984 // Have we built operand maps? We initialize them lazily, and only when doing
986 bool m_haveBuiltOperandMaps;
987 // Mapping between identifier names and numbers.
988 BorrowedIdentifierMap m_identifierMap;
990 CodeBlock* m_dfgCodeBlock;
991 CallLinkStatus::ContextMap m_callContextMap;
992 StubInfoMap m_dfgStubInfos;
994 Instruction* m_currentInstruction;
997 #define NEXT_OPCODE(name) \
998 m_currentIndex += OPCODE_LENGTH(name); \
1001 #define LAST_OPCODE(name) \
1002 m_currentIndex += OPCODE_LENGTH(name); \
1003 return shouldContinueParsing
1005 void ByteCodeParser::handleCall(Instruction* pc, NodeType op, CodeSpecializationKind kind)
1007 ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct));
1009 pc[1].u.operand, op, kind, OPCODE_LENGTH(op_call),
1010 pc[2].u.operand, pc[3].u.operand, -pc[4].u.operand);
1013 void ByteCodeParser::handleCall(
1014 int result, NodeType op, CodeSpecializationKind kind, unsigned instructionSize,
1015 int callee, int argumentCountIncludingThis, int registerOffset)
1017 Node* callTarget = get(VirtualRegister(callee));
1019 CallLinkStatus callLinkStatus = CallLinkStatus::computeFor(
1020 m_inlineStackTop->m_profiledBlock, currentCodeOrigin(),
1021 m_inlineStackTop->m_callLinkInfos, m_callContextMap);
1024 result, op, InlineCallFrame::kindFor(kind), instructionSize, callTarget,
1025 argumentCountIncludingThis, registerOffset, callLinkStatus);
1028 void ByteCodeParser::handleCall(
1029 int result, NodeType op, InlineCallFrame::Kind kind, unsigned instructionSize,
1030 Node* callTarget, int argumentCountIncludingThis, int registerOffset,
1031 CallLinkStatus callLinkStatus)
1034 result, op, kind, instructionSize, callTarget, argumentCountIncludingThis,
1035 registerOffset, callLinkStatus, getPrediction());
1038 void ByteCodeParser::handleCall(
1039 int result, NodeType op, InlineCallFrame::Kind kind, unsigned instructionSize,
1040 Node* callTarget, int argumentCountIncludingThis, int registerOffset,
1041 CallLinkStatus callLinkStatus, SpeculatedType prediction)
1043 ASSERT(registerOffset <= 0);
1045 if (callTarget->hasConstant())
1046 callLinkStatus = CallLinkStatus(callTarget->asJSValue()).setIsProved(true);
1048 if ((!callLinkStatus.canOptimize() || callLinkStatus.size() != 1)
1049 && !isFTL(m_graph.m_plan.mode) && Options::useFTLJIT()
1050 && InlineCallFrame::isNormalCall(kind)
1051 && CallEdgeLog::isEnabled()
1052 && Options::dfgDoesCallEdgeProfiling()) {
1053 ASSERT(op == Call || op == Construct);
1057 op = ProfiledConstruct;
1060 if (!callLinkStatus.canOptimize()) {
1061 // Oddly, this conflates calls that haven't executed with calls that behaved sufficiently polymorphically
1062 // that we cannot optimize them.
1064 addCall(result, op, OpInfo(), callTarget, argumentCountIncludingThis, registerOffset, prediction);
1068 unsigned nextOffset = m_currentIndex + instructionSize;
1072 if (handleInlining(callTarget, result, callLinkStatus, registerOffset, argumentCountIncludingThis, nextOffset, op, kind, prediction)) {
1073 if (m_graph.compilation())
1074 m_graph.compilation()->noticeInlinedCall();
1078 #if ENABLE(FTL_NATIVE_CALL_INLINING)
1079 if (isFTL(m_graph.m_plan.mode) && Options::optimizeNativeCalls() && callLinkStatus.size() == 1 && !callLinkStatus.couldTakeSlowPath()) {
1080 CallVariant callee = callLinkStatus[0].callee();
1081 JSFunction* function = callee.function();
1082 CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1083 if (function && function->isHostFunction()) {
1084 emitFunctionChecks(callee, callTarget, registerOffset, specializationKind);
1085 callOpInfo = OpInfo(m_graph.freeze(function));
1087 if (op == Call || op == ProfiledCall)
1090 ASSERT(op == Construct || op == ProfiledConstruct);
1091 op = NativeConstruct;
1097 addCall(result, op, callOpInfo, callTarget, argumentCountIncludingThis, registerOffset, prediction);
1100 void ByteCodeParser::emitFunctionChecks(CallVariant callee, Node* callTarget, int registerOffset, CodeSpecializationKind kind)
1103 if (kind == CodeForCall)
1104 thisArgument = get(virtualRegisterForArgument(0, registerOffset));
1109 Node* callTargetForCheck;
1110 if (callee.isClosureCall()) {
1111 calleeCell = callee.executable();
1112 callTargetForCheck = addToGraph(GetExecutable, callTarget);
1114 calleeCell = callee.nonExecutableCallee();
1115 callTargetForCheck = callTarget;
1119 addToGraph(CheckCell, OpInfo(m_graph.freeze(calleeCell)), callTargetForCheck, thisArgument);
1122 void ByteCodeParser::undoFunctionChecks(CallVariant callee)
1124 removeLastNodeFromGraph(CheckCell);
1125 if (callee.isClosureCall())
1126 removeLastNodeFromGraph(GetExecutable);
1129 void ByteCodeParser::emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis, CodeSpecializationKind kind)
1131 for (int i = kind == CodeForCall ? 0 : 1; i < argumentCountIncludingThis; ++i)
1132 addToGraph(Phantom, get(virtualRegisterForArgument(i, registerOffset)));
1135 unsigned ByteCodeParser::inliningCost(CallVariant callee, int argumentCountIncludingThis, CodeSpecializationKind kind)
1138 dataLog("Considering inlining ", callee, " into ", currentCodeOrigin(), "\n");
1140 FunctionExecutable* executable = callee.functionExecutable();
1143 dataLog(" Failing because there is no function executable.");
1147 // Does the number of arguments we're passing match the arity of the target? We currently
1148 // inline only if the number of arguments passed is greater than or equal to the number
1149 // arguments expected.
1150 if (static_cast<int>(executable->parameterCount()) + 1 > argumentCountIncludingThis) {
1152 dataLog(" Failing because of arity mismatch.\n");
1156 // Do we have a code block, and does the code block's size match the heuristics/requirements for
1157 // being an inline candidate? We might not have a code block if code was thrown away or if we
1158 // simply hadn't actually made this call yet. We could still theoretically attempt to inline it
1159 // if we had a static proof of what was being called; this might happen for example if you call a
1160 // global function, where watchpointing gives us static information. Overall, it's a rare case
1161 // because we expect that any hot callees would have already been compiled.
1162 CodeBlock* codeBlock = executable->baselineCodeBlockFor(kind);
1165 dataLog(" Failing because no code block available.\n");
1168 CapabilityLevel capabilityLevel = inlineFunctionForCapabilityLevel(
1169 codeBlock, kind, callee.isClosureCall());
1170 if (!canInline(capabilityLevel)) {
1172 dataLog(" Failing because the function is not inlineable.\n");
1176 // Check if the caller is already too large. We do this check here because that's just
1177 // where we happen to also have the callee's code block, and we want that for the
1178 // purpose of unsetting SABI.
1179 if (!isSmallEnoughToInlineCodeInto(m_codeBlock)) {
1180 codeBlock->m_shouldAlwaysBeInlined = false;
1182 dataLog(" Failing because the caller is too large.\n");
1186 // FIXME: this should be better at predicting how much bloat we will introduce by inlining
1188 // https://bugs.webkit.org/show_bug.cgi?id=127627
1190 // Have we exceeded inline stack depth, or are we trying to inline a recursive call to
1191 // too many levels? If either of these are detected, then don't inline. We adjust our
1192 // heuristics if we are dealing with a function that cannot otherwise be compiled.
1195 unsigned recursion = 0;
1197 for (InlineStackEntry* entry = m_inlineStackTop; entry; entry = entry->m_caller) {
1199 if (depth >= Options::maximumInliningDepth()) {
1201 dataLog(" Failing because depth exceeded.\n");
1205 if (entry->executable() == executable) {
1207 if (recursion >= Options::maximumInliningRecursion()) {
1209 dataLog(" Failing because recursion detected.\n");
1216 dataLog(" Inlining should be possible.\n");
1218 // It might be possible to inline.
1219 return codeBlock->instructionCount();
1222 void ByteCodeParser::inlineCall(Node* callTargetNode, int resultOperand, CallVariant callee, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind kind, CallerLinkability callerLinkability)
1224 CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1226 ASSERT(inliningCost(callee, argumentCountIncludingThis, specializationKind) != UINT_MAX);
1228 CodeBlock* codeBlock = callee.functionExecutable()->baselineCodeBlockFor(specializationKind);
1230 // FIXME: Don't flush constants!
1232 int inlineCallFrameStart = m_inlineStackTop->remapOperand(VirtualRegister(registerOffset)).offset() + JSStack::CallFrameHeaderSize;
1235 VirtualRegister(inlineCallFrameStart).toLocal() + 1 +
1236 JSStack::CallFrameHeaderSize + codeBlock->m_numCalleeRegisters);
1238 size_t argumentPositionStart = m_graph.m_argumentPositions.size();
1240 VirtualRegister resultReg(resultOperand);
1241 if (resultReg.isValid())
1242 resultReg = m_inlineStackTop->remapOperand(resultReg);
1244 InlineStackEntry inlineStackEntry(
1245 this, codeBlock, codeBlock, m_graph.lastBlock(), callee.function(), resultReg,
1246 (VirtualRegister)inlineCallFrameStart, argumentCountIncludingThis, kind);
1248 // This is where the actual inlining really happens.
1249 unsigned oldIndex = m_currentIndex;
1252 InlineVariableData inlineVariableData;
1253 inlineVariableData.inlineCallFrame = m_inlineStackTop->m_inlineCallFrame;
1254 inlineVariableData.argumentPositionStart = argumentPositionStart;
1255 inlineVariableData.calleeVariable = 0;
1258 m_inlineStackTop->m_inlineCallFrame->isClosureCall
1259 == callee.isClosureCall());
1260 if (callee.isClosureCall()) {
1261 VariableAccessData* calleeVariable =
1262 set(VirtualRegister(JSStack::Callee), callTargetNode, ImmediateNakedSet)->variableAccessData();
1263 VariableAccessData* scopeVariable =
1264 set(VirtualRegister(JSStack::ScopeChain), addToGraph(GetScope, callTargetNode), ImmediateNakedSet)->variableAccessData();
1266 calleeVariable->mergeShouldNeverUnbox(true);
1267 scopeVariable->mergeShouldNeverUnbox(true);
1269 inlineVariableData.calleeVariable = calleeVariable;
1272 m_graph.m_inlineVariableData.append(inlineVariableData);
1275 clearCaches(); // Reset our state now that we're back to the outer code.
1277 m_currentIndex = oldIndex;
1279 // If the inlined code created some new basic blocks, then we have linking to do.
1280 if (inlineStackEntry.m_callsiteBlockHead != m_graph.lastBlock()) {
1282 ASSERT(!inlineStackEntry.m_unlinkedBlocks.isEmpty());
1283 if (inlineStackEntry.m_callsiteBlockHeadNeedsLinking)
1284 linkBlock(inlineStackEntry.m_callsiteBlockHead, inlineStackEntry.m_blockLinkingTargets);
1286 ASSERT(inlineStackEntry.m_callsiteBlockHead->isLinked);
1288 if (callerLinkability == CallerDoesNormalLinking)
1289 cancelLinkingForBlock(inlineStackEntry.m_caller, inlineStackEntry.m_callsiteBlockHead);
1291 linkBlocks(inlineStackEntry.m_unlinkedBlocks, inlineStackEntry.m_blockLinkingTargets);
1293 ASSERT(inlineStackEntry.m_unlinkedBlocks.isEmpty());
1295 BasicBlock* lastBlock = m_graph.lastBlock();
1296 // If there was a return, but no early returns, then we're done. We allow parsing of
1297 // the caller to continue in whatever basic block we're in right now.
1298 if (!inlineStackEntry.m_didEarlyReturn && inlineStackEntry.m_didReturn) {
1299 ASSERT(lastBlock->isEmpty() || !lastBlock->last()->isTerminal());
1301 // If we created new blocks then the last block needs linking, but in the
1302 // caller. It doesn't need to be linked to, but it needs outgoing links.
1303 if (!inlineStackEntry.m_unlinkedBlocks.isEmpty()) {
1304 // For debugging purposes, set the bytecodeBegin. Note that this doesn't matter
1305 // for release builds because this block will never serve as a potential target
1306 // in the linker's binary search.
1307 lastBlock->bytecodeBegin = m_currentIndex;
1308 if (callerLinkability == CallerDoesNormalLinking) {
1310 dataLog("Adding unlinked block ", RawPointer(m_graph.lastBlock()), " (one return)\n");
1311 m_inlineStackTop->m_caller->m_unlinkedBlocks.append(UnlinkedBlock(m_graph.lastBlock()));
1315 m_currentBlock = m_graph.lastBlock();
1319 // If we get to this point then all blocks must end in some sort of terminals.
1320 ASSERT(lastBlock->last()->isTerminal());
1322 // Need to create a new basic block for the continuation at the caller.
1323 RefPtr<BasicBlock> block = adoptRef(new BasicBlock(nextOffset, m_numArguments, m_numLocals, PNaN));
1325 // Link the early returns to the basic block we're about to create.
1326 for (size_t i = 0; i < inlineStackEntry.m_unlinkedBlocks.size(); ++i) {
1327 if (!inlineStackEntry.m_unlinkedBlocks[i].m_needsEarlyReturnLinking)
1329 BasicBlock* blockToLink = inlineStackEntry.m_unlinkedBlocks[i].m_block;
1330 ASSERT(!blockToLink->isLinked);
1331 Node* node = blockToLink->last();
1332 ASSERT(node->op() == Jump);
1333 ASSERT(!node->targetBlock());
1334 node->targetBlock() = block.get();
1335 inlineStackEntry.m_unlinkedBlocks[i].m_needsEarlyReturnLinking = false;
1337 dataLog("Marking ", RawPointer(blockToLink), " as linked (jumps to return)\n");
1338 blockToLink->didLink();
1341 m_currentBlock = block.get();
1342 ASSERT(m_inlineStackTop->m_caller->m_blockLinkingTargets.isEmpty() || m_inlineStackTop->m_caller->m_blockLinkingTargets.last()->bytecodeBegin < nextOffset);
1344 dataLog("Adding unlinked block ", RawPointer(block.get()), " (many returns)\n");
1345 if (callerLinkability == CallerDoesNormalLinking) {
1346 m_inlineStackTop->m_caller->m_unlinkedBlocks.append(UnlinkedBlock(block.get()));
1347 m_inlineStackTop->m_caller->m_blockLinkingTargets.append(block.get());
1349 m_graph.appendBlock(block);
1350 prepareToParseBlock();
1353 void ByteCodeParser::cancelLinkingForBlock(InlineStackEntry* inlineStackEntry, BasicBlock* block)
1355 // It's possible that the callsite block head is not owned by the caller.
1356 if (!inlineStackEntry->m_unlinkedBlocks.isEmpty()) {
1357 // It's definitely owned by the caller, because the caller created new blocks.
1358 // Assert that this all adds up.
1359 ASSERT_UNUSED(block, inlineStackEntry->m_unlinkedBlocks.last().m_block == block);
1360 ASSERT(inlineStackEntry->m_unlinkedBlocks.last().m_needsNormalLinking);
1361 inlineStackEntry->m_unlinkedBlocks.last().m_needsNormalLinking = false;
1363 // It's definitely not owned by the caller. Tell the caller that he does not
1364 // need to link his callsite block head, because we did it for him.
1365 ASSERT(inlineStackEntry->m_callsiteBlockHeadNeedsLinking);
1366 ASSERT_UNUSED(block, inlineStackEntry->m_callsiteBlockHead == block);
1367 inlineStackEntry->m_callsiteBlockHeadNeedsLinking = false;
1371 bool ByteCodeParser::attemptToInlineCall(Node* callTargetNode, int resultOperand, CallVariant callee, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind kind, CallerLinkability callerLinkability, SpeculatedType prediction, unsigned& inliningBalance)
1373 CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1375 if (!inliningBalance)
1378 if (InternalFunction* function = callee.internalFunction()) {
1379 if (handleConstantInternalFunction(resultOperand, function, registerOffset, argumentCountIncludingThis, specializationKind)) {
1380 addToGraph(Phantom, callTargetNode);
1381 emitArgumentPhantoms(registerOffset, argumentCountIncludingThis, specializationKind);
1388 Intrinsic intrinsic = callee.intrinsicFor(specializationKind);
1389 if (intrinsic != NoIntrinsic) {
1390 if (handleIntrinsic(resultOperand, intrinsic, registerOffset, argumentCountIncludingThis, prediction)) {
1391 addToGraph(Phantom, callTargetNode);
1392 emitArgumentPhantoms(registerOffset, argumentCountIncludingThis, specializationKind);
1399 unsigned myInliningCost = inliningCost(callee, argumentCountIncludingThis, specializationKind);
1400 if (myInliningCost > inliningBalance)
1403 inlineCall(callTargetNode, resultOperand, callee, registerOffset, argumentCountIncludingThis, nextOffset, kind, callerLinkability);
1404 inliningBalance -= myInliningCost;
1408 bool ByteCodeParser::handleInlining(Node* callTargetNode, int resultOperand, const CallLinkStatus& callLinkStatus, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, NodeType callOp, InlineCallFrame::Kind kind, SpeculatedType prediction)
1411 dataLog("Handling inlining...\n");
1412 dataLog("Stack: ", currentCodeOrigin(), "\n");
1414 CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1416 if (!callLinkStatus.size()) {
1418 dataLog("Bailing inlining.\n");
1422 unsigned inliningBalance = Options::maximumFunctionForCallInlineCandidateInstructionCount();
1423 if (specializationKind == CodeForConstruct)
1424 inliningBalance = std::min(inliningBalance, Options::maximumFunctionForConstructInlineCandidateInstructionCount());
1425 if (callLinkStatus.isClosureCall())
1426 inliningBalance = std::min(inliningBalance, Options::maximumFunctionForClosureCallInlineCandidateInstructionCount());
1428 // First check if we can avoid creating control flow. Our inliner does some CFG
1429 // simplification on the fly and this helps reduce compile times, but we can only leverage
1430 // this in cases where we don't need control flow diamonds to check the callee.
1431 if (!callLinkStatus.couldTakeSlowPath() && callLinkStatus.size() == 1) {
1433 callLinkStatus[0].callee(), callTargetNode, registerOffset, specializationKind);
1434 bool result = attemptToInlineCall(
1435 callTargetNode, resultOperand, callLinkStatus[0].callee(), registerOffset,
1436 argumentCountIncludingThis, nextOffset, kind, CallerDoesNormalLinking, prediction,
1438 if (!result && !callLinkStatus.isProved())
1439 undoFunctionChecks(callLinkStatus[0].callee());
1441 dataLog("Done inlining (simple).\n");
1442 dataLog("Stack: ", currentCodeOrigin(), "\n");
1447 // We need to create some kind of switch over callee. For now we only do this if we believe that
1448 // we're in the top tier. We have two reasons for this: first, it provides us an opportunity to
1449 // do more detailed polyvariant/polymorphic profiling; and second, it reduces compile times in
1450 // the DFG. And by polyvariant profiling we mean polyvariant profiling of *this* call. Note that
1451 // we could improve that aspect of this by doing polymorphic inlining but having the profiling
1452 // also. Currently we opt against this, but it could be interesting. That would require having a
1453 // separate node for call edge profiling.
1454 // FIXME: Introduce the notion of a separate call edge profiling node.
1455 // https://bugs.webkit.org/show_bug.cgi?id=136033
1456 if (!isFTL(m_graph.m_plan.mode) || !Options::enablePolymorphicCallInlining()) {
1458 dataLog("Bailing inlining (hard).\n");
1459 dataLog("Stack: ", currentCodeOrigin(), "\n");
1464 unsigned oldOffset = m_currentIndex;
1466 bool allAreClosureCalls = true;
1467 bool allAreDirectCalls = true;
1468 for (unsigned i = callLinkStatus.size(); i--;) {
1469 if (callLinkStatus[i].callee().isClosureCall())
1470 allAreDirectCalls = false;
1472 allAreClosureCalls = false;
1475 Node* thingToSwitchOn;
1476 if (allAreDirectCalls)
1477 thingToSwitchOn = callTargetNode;
1478 else if (allAreClosureCalls)
1479 thingToSwitchOn = addToGraph(GetExecutable, callTargetNode);
1481 // FIXME: We should be able to handle this case, but it's tricky and we don't know of cases
1482 // where it would be beneficial. Also, CallLinkStatus would make all callees appear like
1483 // closure calls if any calls were closure calls - except for calls to internal functions.
1484 // So this will only arise if some callees are internal functions and others are closures.
1485 // https://bugs.webkit.org/show_bug.cgi?id=136020
1487 dataLog("Bailing inlining (mix).\n");
1488 dataLog("Stack: ", currentCodeOrigin(), "\n");
1494 dataLog("Doing hard inlining...\n");
1495 dataLog("Stack: ", currentCodeOrigin(), "\n");
1498 // This makes me wish that we were in SSA all the time. We need to pick a variable into which to
1499 // store the callee so that it will be accessible to all of the blocks we're about to create. We
1500 // get away with doing an immediate-set here because we wouldn't have performed any side effects
1503 dataLog("Register offset: ", registerOffset);
1504 VirtualRegister calleeReg(registerOffset + JSStack::Callee);
1505 calleeReg = m_inlineStackTop->remapOperand(calleeReg);
1507 dataLog("Callee is going to be ", calleeReg, "\n");
1508 setDirect(calleeReg, callTargetNode, ImmediateSetWithFlush);
1510 SwitchData& data = *m_graph.m_switchData.add();
1511 data.kind = SwitchCell;
1512 addToGraph(Switch, OpInfo(&data), thingToSwitchOn);
1514 BasicBlock* originBlock = m_currentBlock;
1516 dataLog("Marking ", RawPointer(originBlock), " as linked (origin of poly inline)\n");
1517 originBlock->didLink();
1518 cancelLinkingForBlock(m_inlineStackTop, originBlock);
1520 // Each inlined callee will have a landing block that it returns at. They should all have jumps
1521 // to the continuation block, which we create last.
1522 Vector<BasicBlock*> landingBlocks;
1524 // We make force this true if we give up on inlining any of the edges.
1525 bool couldTakeSlowPath = callLinkStatus.couldTakeSlowPath();
1528 dataLog("About to loop over functions at ", currentCodeOrigin(), ".\n");
1530 for (unsigned i = 0; i < callLinkStatus.size(); ++i) {
1531 m_currentIndex = oldOffset;
1532 RefPtr<BasicBlock> block = adoptRef(new BasicBlock(UINT_MAX, m_numArguments, m_numLocals, PNaN));
1533 m_currentBlock = block.get();
1534 m_graph.appendBlock(block);
1535 prepareToParseBlock();
1537 Node* myCallTargetNode = getDirect(calleeReg);
1539 bool inliningResult = attemptToInlineCall(
1540 myCallTargetNode, resultOperand, callLinkStatus[i].callee(), registerOffset,
1541 argumentCountIncludingThis, nextOffset, kind, CallerLinksManually, prediction,
1544 if (!inliningResult) {
1545 // That failed so we let the block die. Nothing interesting should have been added to
1546 // the block. We also give up on inlining any of the (less frequent) callees.
1547 ASSERT(m_currentBlock == block.get());
1548 ASSERT(m_graph.m_blocks.last() == block);
1549 m_graph.killBlockAndItsContents(block.get());
1550 m_graph.m_blocks.removeLast();
1552 // The fact that inlining failed means we need a slow path.
1553 couldTakeSlowPath = true;
1557 JSCell* thingToCaseOn;
1558 if (allAreDirectCalls)
1559 thingToCaseOn = callLinkStatus[i].callee().nonExecutableCallee();
1561 ASSERT(allAreClosureCalls);
1562 thingToCaseOn = callLinkStatus[i].callee().executable();
1564 data.cases.append(SwitchCase(m_graph.freeze(thingToCaseOn), block.get()));
1565 m_currentIndex = nextOffset;
1566 processSetLocalQueue(); // This only comes into play for intrinsics, since normal inlined code will leave an empty queue.
1569 dataLog("Marking ", RawPointer(m_currentBlock), " as linked (tail of poly inlinee)\n");
1570 m_currentBlock->didLink();
1571 landingBlocks.append(m_currentBlock);
1574 dataLog("Finished inlining ", callLinkStatus[i].callee(), " at ", currentCodeOrigin(), ".\n");
1577 RefPtr<BasicBlock> slowPathBlock = adoptRef(
1578 new BasicBlock(UINT_MAX, m_numArguments, m_numLocals, PNaN));
1579 m_currentIndex = oldOffset;
1580 data.fallThrough = BranchTarget(slowPathBlock.get());
1581 m_graph.appendBlock(slowPathBlock);
1583 dataLog("Marking ", RawPointer(slowPathBlock.get()), " as linked (slow path block)\n");
1584 slowPathBlock->didLink();
1585 prepareToParseBlock();
1586 m_currentBlock = slowPathBlock.get();
1587 Node* myCallTargetNode = getDirect(calleeReg);
1588 if (couldTakeSlowPath) {
1590 resultOperand, callOp, OpInfo(), myCallTargetNode, argumentCountIncludingThis,
1591 registerOffset, prediction);
1593 addToGraph(CheckBadCell);
1594 addToGraph(Phantom, myCallTargetNode);
1595 emitArgumentPhantoms(registerOffset, argumentCountIncludingThis, specializationKind);
1597 set(VirtualRegister(resultOperand), addToGraph(BottomValue));
1600 m_currentIndex = nextOffset;
1601 processSetLocalQueue();
1603 landingBlocks.append(m_currentBlock);
1605 RefPtr<BasicBlock> continuationBlock = adoptRef(
1606 new BasicBlock(UINT_MAX, m_numArguments, m_numLocals, PNaN));
1607 m_graph.appendBlock(continuationBlock);
1609 dataLog("Adding unlinked block ", RawPointer(continuationBlock.get()), " (continuation)\n");
1610 m_inlineStackTop->m_unlinkedBlocks.append(UnlinkedBlock(continuationBlock.get()));
1611 prepareToParseBlock();
1612 m_currentBlock = continuationBlock.get();
1614 for (unsigned i = landingBlocks.size(); i--;)
1615 landingBlocks[i]->last()->targetBlock() = continuationBlock.get();
1617 m_currentIndex = oldOffset;
1620 dataLog("Done inlining (hard).\n");
1621 dataLog("Stack: ", currentCodeOrigin(), "\n");
1626 bool ByteCodeParser::handleMinMax(int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis)
1628 if (argumentCountIncludingThis == 1) { // Math.min()
1629 set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN)));
1633 if (argumentCountIncludingThis == 2) { // Math.min(x)
1634 Node* result = get(VirtualRegister(virtualRegisterForArgument(1, registerOffset)));
1635 addToGraph(Phantom, Edge(result, NumberUse));
1636 set(VirtualRegister(resultOperand), result);
1640 if (argumentCountIncludingThis == 3) { // Math.min(x, y)
1641 set(VirtualRegister(resultOperand), addToGraph(op, get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset))));
1645 // Don't handle >=3 arguments for now.
1649 bool ByteCodeParser::handleIntrinsic(int resultOperand, Intrinsic intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction)
1651 switch (intrinsic) {
1652 case AbsIntrinsic: {
1653 if (argumentCountIncludingThis == 1) { // Math.abs()
1654 set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN)));
1658 if (!MacroAssembler::supportsFloatingPointAbs())
1661 Node* node = addToGraph(ArithAbs, get(virtualRegisterForArgument(1, registerOffset)));
1662 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
1663 node->mergeFlags(NodeMayOverflowInDFG);
1664 set(VirtualRegister(resultOperand), node);
1669 return handleMinMax(resultOperand, ArithMin, registerOffset, argumentCountIncludingThis);
1672 return handleMinMax(resultOperand, ArithMax, registerOffset, argumentCountIncludingThis);
1676 case SinIntrinsic: {
1677 if (argumentCountIncludingThis == 1) {
1678 set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN)));
1682 switch (intrinsic) {
1684 if (!MacroAssembler::supportsFloatingPointSqrt())
1687 set(VirtualRegister(resultOperand), addToGraph(ArithSqrt, get(virtualRegisterForArgument(1, registerOffset))));
1691 set(VirtualRegister(resultOperand), addToGraph(ArithCos, get(virtualRegisterForArgument(1, registerOffset))));
1695 set(VirtualRegister(resultOperand), addToGraph(ArithSin, get(virtualRegisterForArgument(1, registerOffset))));
1699 RELEASE_ASSERT_NOT_REACHED();
1704 case ArrayPushIntrinsic: {
1705 if (argumentCountIncludingThis != 2)
1708 ArrayMode arrayMode = getArrayMode(m_currentInstruction[OPCODE_LENGTH(op_call) - 2].u.arrayProfile);
1709 if (!arrayMode.isJSArray())
1711 switch (arrayMode.type()) {
1712 case Array::Undecided:
1715 case Array::Contiguous:
1716 case Array::ArrayStorage: {
1717 Node* arrayPush = addToGraph(ArrayPush, OpInfo(arrayMode.asWord()), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)));
1718 set(VirtualRegister(resultOperand), arrayPush);
1728 case ArrayPopIntrinsic: {
1729 if (argumentCountIncludingThis != 1)
1732 ArrayMode arrayMode = getArrayMode(m_currentInstruction[OPCODE_LENGTH(op_call) - 2].u.arrayProfile);
1733 if (!arrayMode.isJSArray())
1735 switch (arrayMode.type()) {
1738 case Array::Contiguous:
1739 case Array::ArrayStorage: {
1740 Node* arrayPop = addToGraph(ArrayPop, OpInfo(arrayMode.asWord()), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)));
1741 set(VirtualRegister(resultOperand), arrayPop);
1750 case CharCodeAtIntrinsic: {
1751 if (argumentCountIncludingThis != 2)
1754 VirtualRegister thisOperand = virtualRegisterForArgument(0, registerOffset);
1755 VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
1756 Node* charCode = addToGraph(StringCharCodeAt, OpInfo(ArrayMode(Array::String).asWord()), get(thisOperand), get(indexOperand));
1758 set(VirtualRegister(resultOperand), charCode);
1762 case CharAtIntrinsic: {
1763 if (argumentCountIncludingThis != 2)
1766 VirtualRegister thisOperand = virtualRegisterForArgument(0, registerOffset);
1767 VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
1768 Node* charCode = addToGraph(StringCharAt, OpInfo(ArrayMode(Array::String).asWord()), get(thisOperand), get(indexOperand));
1770 set(VirtualRegister(resultOperand), charCode);
1773 case FromCharCodeIntrinsic: {
1774 if (argumentCountIncludingThis != 2)
1777 VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
1778 Node* charCode = addToGraph(StringFromCharCode, get(indexOperand));
1780 set(VirtualRegister(resultOperand), charCode);
1785 case RegExpExecIntrinsic: {
1786 if (argumentCountIncludingThis != 2)
1789 Node* regExpExec = addToGraph(RegExpExec, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)));
1790 set(VirtualRegister(resultOperand), regExpExec);
1795 case RegExpTestIntrinsic: {
1796 if (argumentCountIncludingThis != 2)
1799 Node* regExpExec = addToGraph(RegExpTest, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)));
1800 set(VirtualRegister(resultOperand), regExpExec);
1805 case IMulIntrinsic: {
1806 if (argumentCountIncludingThis != 3)
1808 VirtualRegister leftOperand = virtualRegisterForArgument(1, registerOffset);
1809 VirtualRegister rightOperand = virtualRegisterForArgument(2, registerOffset);
1810 Node* left = get(leftOperand);
1811 Node* right = get(rightOperand);
1812 set(VirtualRegister(resultOperand), addToGraph(ArithIMul, left, right));
1816 case FRoundIntrinsic: {
1817 if (argumentCountIncludingThis != 2)
1819 VirtualRegister operand = virtualRegisterForArgument(1, registerOffset);
1820 set(VirtualRegister(resultOperand), addToGraph(ArithFRound, get(operand)));
1824 case DFGTrueIntrinsic: {
1825 set(VirtualRegister(resultOperand), jsConstant(jsBoolean(true)));
1829 case OSRExitIntrinsic: {
1830 addToGraph(ForceOSRExit);
1831 set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantUndefined)));
1835 case IsFinalTierIntrinsic: {
1836 set(VirtualRegister(resultOperand),
1837 jsConstant(jsBoolean(Options::useFTLJIT() ? isFTL(m_graph.m_plan.mode) : true)));
1841 case SetInt32HeapPredictionIntrinsic: {
1842 for (int i = 1; i < argumentCountIncludingThis; ++i) {
1843 Node* node = get(virtualRegisterForArgument(i, registerOffset));
1844 if (node->hasHeapPrediction())
1845 node->setHeapPrediction(SpecInt32);
1847 set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantUndefined)));
1851 case FiatInt52Intrinsic: {
1852 if (argumentCountIncludingThis != 2)
1854 VirtualRegister operand = virtualRegisterForArgument(1, registerOffset);
1856 set(VirtualRegister(resultOperand), addToGraph(FiatInt52, get(operand)));
1858 set(VirtualRegister(resultOperand), get(operand));
1867 bool ByteCodeParser::handleTypedArrayConstructor(
1868 int resultOperand, InternalFunction* function, int registerOffset,
1869 int argumentCountIncludingThis, TypedArrayType type)
1871 if (!isTypedView(type))
1874 if (function->classInfo() != constructorClassInfoForType(type))
1877 if (function->globalObject() != m_inlineStackTop->m_codeBlock->globalObject())
1880 // We only have an intrinsic for the case where you say:
1882 // new FooArray(blah);
1884 // Of course, 'blah' could be any of the following:
1886 // - Integer, indicating that you want to allocate an array of that length.
1887 // This is the thing we're hoping for, and what we can actually do meaningful
1888 // optimizations for.
1890 // - Array buffer, indicating that you want to create a view onto that _entire_
1893 // - Non-buffer object, indicating that you want to create a copy of that
1894 // object by pretending that it quacks like an array.
1896 // - Anything else, indicating that you want to have an exception thrown at
1899 // The intrinsic, NewTypedArray, will behave as if it could do any of these
1900 // things up until we do Fixup. Thereafter, if child1 (i.e. 'blah') is
1901 // predicted Int32, then we lock it in as a normal typed array allocation.
1902 // Otherwise, NewTypedArray turns into a totally opaque function call that
1903 // may clobber the world - by virtue of it accessing properties on what could
1906 // Note that although the generic form of NewTypedArray sounds sort of awful,
1907 // it is actually quite likely to be more efficient than a fully generic
1908 // Construct. So, we might want to think about making NewTypedArray variadic,
1909 // or else making Construct not super slow.
1911 if (argumentCountIncludingThis != 2)
1914 set(VirtualRegister(resultOperand),
1915 addToGraph(NewTypedArray, OpInfo(type), get(virtualRegisterForArgument(1, registerOffset))));
1919 bool ByteCodeParser::handleConstantInternalFunction(
1920 int resultOperand, InternalFunction* function, int registerOffset,
1921 int argumentCountIncludingThis, CodeSpecializationKind kind)
1923 // If we ever find that we have a lot of internal functions that we specialize for,
1924 // then we should probably have some sort of hashtable dispatch, or maybe even
1925 // dispatch straight through the MethodTable of the InternalFunction. But for now,
1926 // it seems that this case is hit infrequently enough, and the number of functions
1927 // we know about is small enough, that having just a linear cascade of if statements
1930 if (function->classInfo() == ArrayConstructor::info()) {
1931 if (function->globalObject() != m_inlineStackTop->m_codeBlock->globalObject())
1934 if (argumentCountIncludingThis == 2) {
1935 set(VirtualRegister(resultOperand),
1936 addToGraph(NewArrayWithSize, OpInfo(ArrayWithUndecided), get(virtualRegisterForArgument(1, registerOffset))));
1940 for (int i = 1; i < argumentCountIncludingThis; ++i)
1941 addVarArgChild(get(virtualRegisterForArgument(i, registerOffset)));
1942 set(VirtualRegister(resultOperand),
1943 addToGraph(Node::VarArg, NewArray, OpInfo(ArrayWithUndecided), OpInfo(0)));
1947 if (function->classInfo() == StringConstructor::info()) {
1950 if (argumentCountIncludingThis <= 1)
1951 result = jsConstant(m_vm->smallStrings.emptyString());
1953 result = addToGraph(ToString, get(virtualRegisterForArgument(1, registerOffset)));
1955 if (kind == CodeForConstruct)
1956 result = addToGraph(NewStringObject, OpInfo(function->globalObject()->stringObjectStructure()), result);
1958 set(VirtualRegister(resultOperand), result);
1962 for (unsigned typeIndex = 0; typeIndex < NUMBER_OF_TYPED_ARRAY_TYPES; ++typeIndex) {
1963 bool result = handleTypedArrayConstructor(
1964 resultOperand, function, registerOffset, argumentCountIncludingThis,
1965 indexToTypedArrayType(typeIndex));
1973 Node* ByteCodeParser::handleGetByOffset(SpeculatedType prediction, Node* base, const StructureSet& structureSet, unsigned identifierNumber, PropertyOffset offset, NodeType op)
1975 if (base->hasConstant()) {
1976 if (JSValue constant = m_graph.tryGetConstantProperty(base->asJSValue(), structureSet, offset)) {
1977 addToGraph(Phantom, base);
1978 return weakJSConstant(constant);
1982 Node* propertyStorage;
1983 if (isInlineOffset(offset))
1984 propertyStorage = base;
1986 propertyStorage = addToGraph(GetButterfly, base);
1987 Node* getByOffset = addToGraph(op, OpInfo(m_graph.m_storageAccessData.size()), OpInfo(prediction), propertyStorage, base);
1989 StorageAccessData storageAccessData;
1990 storageAccessData.offset = offset;
1991 storageAccessData.identifierNumber = identifierNumber;
1992 m_graph.m_storageAccessData.append(storageAccessData);
1997 Node* ByteCodeParser::handlePutByOffset(Node* base, unsigned identifier, PropertyOffset offset, Node* value)
1999 Node* propertyStorage;
2000 if (isInlineOffset(offset))
2001 propertyStorage = base;
2003 propertyStorage = addToGraph(GetButterfly, base);
2004 Node* result = addToGraph(PutByOffset, OpInfo(m_graph.m_storageAccessData.size()), propertyStorage, base, value);
2006 StorageAccessData storageAccessData;
2007 storageAccessData.offset = offset;
2008 storageAccessData.identifierNumber = identifier;
2009 m_graph.m_storageAccessData.append(storageAccessData);
2014 void ByteCodeParser::emitChecks(const ConstantStructureCheckVector& vector)
2016 for (unsigned i = 0; i < vector.size(); ++i)
2017 cellConstantWithStructureCheck(vector[i].constant(), vector[i].structure());
2020 void ByteCodeParser::handleGetById(
2021 int destinationOperand, SpeculatedType prediction, Node* base, unsigned identifierNumber,
2022 const GetByIdStatus& getByIdStatus)
2024 NodeType getById = getByIdStatus.makesCalls() ? GetByIdFlush : GetById;
2026 if (!getByIdStatus.isSimple() || !Options::enableAccessInlining()) {
2027 set(VirtualRegister(destinationOperand),
2028 addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base));
2032 if (getByIdStatus.numVariants() > 1) {
2033 if (getByIdStatus.makesCalls() || !isFTL(m_graph.m_plan.mode)
2034 || !Options::enablePolymorphicAccessInlining()) {
2035 set(VirtualRegister(destinationOperand),
2036 addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base));
2040 if (m_graph.compilation())
2041 m_graph.compilation()->noticeInlinedGetById();
2043 // 1) Emit prototype structure checks for all chains. This could sort of maybe not be
2044 // optimal, if there is some rarely executed case in the chain that requires a lot
2045 // of checks and those checks are not watchpointable.
2046 for (unsigned variantIndex = getByIdStatus.numVariants(); variantIndex--;)
2047 emitChecks(getByIdStatus[variantIndex].constantChecks());
2049 // 2) Emit a MultiGetByOffset
2050 MultiGetByOffsetData* data = m_graph.m_multiGetByOffsetData.add();
2051 data->variants = getByIdStatus.variants();
2052 data->identifierNumber = identifierNumber;
2053 set(VirtualRegister(destinationOperand),
2054 addToGraph(MultiGetByOffset, OpInfo(data), OpInfo(prediction), base));
2058 ASSERT(getByIdStatus.numVariants() == 1);
2059 GetByIdVariant variant = getByIdStatus[0];
2061 if (m_graph.compilation())
2062 m_graph.compilation()->noticeInlinedGetById();
2064 Node* originalBase = base;
2066 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(variant.structureSet())), base);
2068 emitChecks(variant.constantChecks());
2070 if (variant.alternateBase())
2071 base = weakJSConstant(variant.alternateBase());
2073 // Unless we want bugs like https://bugs.webkit.org/show_bug.cgi?id=88783, we need to
2074 // ensure that the base of the original get_by_id is kept alive until we're done with
2075 // all of the speculations. We only insert the Phantom if there had been a CheckStructure
2076 // on something other than the base following the CheckStructure on base.
2077 if (originalBase != base)
2078 addToGraph(Phantom, originalBase);
2080 Node* loadedValue = handleGetByOffset(
2081 variant.callLinkStatus() ? SpecCellOther : prediction,
2082 base, variant.baseStructure(), identifierNumber, variant.offset(),
2083 variant.callLinkStatus() ? GetGetterSetterByOffset : GetByOffset);
2085 if (!variant.callLinkStatus()) {
2086 set(VirtualRegister(destinationOperand), loadedValue);
2090 Node* getter = addToGraph(GetGetter, loadedValue);
2092 // Make a call. We don't try to get fancy with using the smallest operand number because
2093 // the stack layout phase should compress the stack anyway.
2095 unsigned numberOfParameters = 0;
2096 numberOfParameters++; // The 'this' argument.
2097 numberOfParameters++; // True return PC.
2099 // Start with a register offset that corresponds to the last in-use register.
2100 int registerOffset = virtualRegisterForLocal(
2101 m_inlineStackTop->m_profiledBlock->m_numCalleeRegisters - 1).offset();
2102 registerOffset -= numberOfParameters;
2103 registerOffset -= JSStack::CallFrameHeaderSize;
2105 // Get the alignment right.
2106 registerOffset = -WTF::roundUpToMultipleOf(
2107 stackAlignmentRegisters(),
2111 m_inlineStackTop->remapOperand(
2112 VirtualRegister(registerOffset)).toLocal());
2114 // Issue SetLocals. This has two effects:
2115 // 1) That's how handleCall() sees the arguments.
2116 // 2) If we inline then this ensures that the arguments are flushed so that if you use
2117 // the dreaded arguments object on the getter, the right things happen. Well, sort of -
2118 // since we only really care about 'this' in this case. But we're not going to take that
2120 int nextRegister = registerOffset + JSStack::CallFrameHeaderSize;
2121 set(VirtualRegister(nextRegister++), originalBase, ImmediateNakedSet);
2124 destinationOperand, Call, InlineCallFrame::GetterCall, OPCODE_LENGTH(op_get_by_id),
2125 getter, numberOfParameters - 1, registerOffset, *variant.callLinkStatus(), prediction);
2128 void ByteCodeParser::emitPutById(
2129 Node* base, unsigned identifierNumber, Node* value, const PutByIdStatus& putByIdStatus, bool isDirect)
2132 addToGraph(PutByIdDirect, OpInfo(identifierNumber), base, value);
2134 addToGraph(putByIdStatus.makesCalls() ? PutByIdFlush : PutById, OpInfo(identifierNumber), base, value);
2137 void ByteCodeParser::handlePutById(
2138 Node* base, unsigned identifierNumber, Node* value,
2139 const PutByIdStatus& putByIdStatus, bool isDirect)
2141 if (!putByIdStatus.isSimple() || !Options::enableAccessInlining()) {
2142 if (!putByIdStatus.isSet())
2143 addToGraph(ForceOSRExit);
2144 emitPutById(base, identifierNumber, value, putByIdStatus, isDirect);
2148 if (putByIdStatus.numVariants() > 1) {
2149 if (!isFTL(m_graph.m_plan.mode) || putByIdStatus.makesCalls()
2150 || !Options::enablePolymorphicAccessInlining()) {
2151 emitPutById(base, identifierNumber, value, putByIdStatus, isDirect);
2155 if (m_graph.compilation())
2156 m_graph.compilation()->noticeInlinedPutById();
2159 for (unsigned variantIndex = putByIdStatus.numVariants(); variantIndex--;) {
2160 if (putByIdStatus[variantIndex].kind() != PutByIdVariant::Transition)
2162 emitChecks(putByIdStatus[variantIndex].constantChecks());
2166 MultiPutByOffsetData* data = m_graph.m_multiPutByOffsetData.add();
2167 data->variants = putByIdStatus.variants();
2168 data->identifierNumber = identifierNumber;
2169 addToGraph(MultiPutByOffset, OpInfo(data), base, value);
2173 ASSERT(putByIdStatus.numVariants() == 1);
2174 const PutByIdVariant& variant = putByIdStatus[0];
2176 switch (variant.kind()) {
2177 case PutByIdVariant::Replace: {
2178 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(variant.structure())), base);
2179 handlePutByOffset(base, identifierNumber, variant.offset(), value);
2180 if (m_graph.compilation())
2181 m_graph.compilation()->noticeInlinedPutById();
2185 case PutByIdVariant::Transition: {
2186 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(variant.oldStructure())), base);
2187 emitChecks(variant.constantChecks());
2189 ASSERT(variant.oldStructureForTransition()->transitionWatchpointSetHasBeenInvalidated());
2191 Node* propertyStorage;
2192 Transition* transition = m_graph.m_transitions.add(
2193 variant.oldStructureForTransition(), variant.newStructure());
2195 if (variant.reallocatesStorage()) {
2197 // If we're growing the property storage then it must be because we're
2198 // storing into the out-of-line storage.
2199 ASSERT(!isInlineOffset(variant.offset()));
2201 if (!variant.oldStructureForTransition()->outOfLineCapacity()) {
2202 propertyStorage = addToGraph(
2203 AllocatePropertyStorage, OpInfo(transition), base);
2205 propertyStorage = addToGraph(
2206 ReallocatePropertyStorage, OpInfo(transition),
2207 base, addToGraph(GetButterfly, base));
2210 if (isInlineOffset(variant.offset()))
2211 propertyStorage = base;
2213 propertyStorage = addToGraph(GetButterfly, base);
2216 addToGraph(PutStructure, OpInfo(transition), base);
2220 OpInfo(m_graph.m_storageAccessData.size()),
2225 StorageAccessData storageAccessData;
2226 storageAccessData.offset = variant.offset();
2227 storageAccessData.identifierNumber = identifierNumber;
2228 m_graph.m_storageAccessData.append(storageAccessData);
2230 if (m_graph.compilation())
2231 m_graph.compilation()->noticeInlinedPutById();
2235 case PutByIdVariant::Setter: {
2236 Node* originalBase = base;
2239 CheckStructure, OpInfo(m_graph.addStructureSet(variant.structure())), base);
2241 emitChecks(variant.constantChecks());
2243 if (variant.alternateBase())
2244 base = weakJSConstant(variant.alternateBase());
2246 Node* loadedValue = handleGetByOffset(
2247 SpecCellOther, base, variant.baseStructure(), identifierNumber, variant.offset(),
2248 GetGetterSetterByOffset);
2250 Node* setter = addToGraph(GetSetter, loadedValue);
2252 // Make a call. We don't try to get fancy with using the smallest operand number because
2253 // the stack layout phase should compress the stack anyway.
2255 unsigned numberOfParameters = 0;
2256 numberOfParameters++; // The 'this' argument.
2257 numberOfParameters++; // The new value.
2258 numberOfParameters++; // True return PC.
2260 // Start with a register offset that corresponds to the last in-use register.
2261 int registerOffset = virtualRegisterForLocal(
2262 m_inlineStackTop->m_profiledBlock->m_numCalleeRegisters - 1).offset();
2263 registerOffset -= numberOfParameters;
2264 registerOffset -= JSStack::CallFrameHeaderSize;
2266 // Get the alignment right.
2267 registerOffset = -WTF::roundUpToMultipleOf(
2268 stackAlignmentRegisters(),
2272 m_inlineStackTop->remapOperand(
2273 VirtualRegister(registerOffset)).toLocal());
2275 int nextRegister = registerOffset + JSStack::CallFrameHeaderSize;
2276 set(VirtualRegister(nextRegister++), originalBase, ImmediateNakedSet);
2277 set(VirtualRegister(nextRegister++), value, ImmediateNakedSet);
2280 VirtualRegister().offset(), Call, InlineCallFrame::SetterCall,
2281 OPCODE_LENGTH(op_put_by_id), setter, numberOfParameters - 1, registerOffset,
2282 *variant.callLinkStatus(), SpecOther);
2287 emitPutById(base, identifierNumber, value, putByIdStatus, isDirect);
2292 void ByteCodeParser::prepareToParseBlock()
2295 ASSERT(m_setLocalQueue.isEmpty());
2298 void ByteCodeParser::clearCaches()
2300 m_constants.resize(0);
2303 Node* ByteCodeParser::getScope(unsigned skipCount)
2305 Node* localBase = get(VirtualRegister(JSStack::ScopeChain));
2306 for (unsigned n = skipCount; n--;)
2307 localBase = addToGraph(SkipScope, localBase);
2311 bool ByteCodeParser::parseBlock(unsigned limit)
2313 bool shouldContinueParsing = true;
2315 Interpreter* interpreter = m_vm->interpreter;
2316 Instruction* instructionsBegin = m_inlineStackTop->m_codeBlock->instructions().begin();
2317 unsigned blockBegin = m_currentIndex;
2319 // If we are the first basic block, introduce markers for arguments. This allows
2320 // us to track if a use of an argument may use the actual argument passed, as
2321 // opposed to using a value we set explicitly.
2322 if (m_currentBlock == m_graph.block(0) && !inlineCallFrame()) {
2323 m_graph.m_arguments.resize(m_numArguments);
2324 for (unsigned argument = 0; argument < m_numArguments; ++argument) {
2325 VariableAccessData* variable = newVariableAccessData(
2326 virtualRegisterForArgument(argument), m_codeBlock->isCaptured(virtualRegisterForArgument(argument)));
2327 variable->mergeStructureCheckHoistingFailed(
2328 m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
2329 variable->mergeCheckArrayHoistingFailed(
2330 m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIndexingType));
2332 Node* setArgument = addToGraph(SetArgument, OpInfo(variable));
2333 m_graph.m_arguments[argument] = setArgument;
2334 m_currentBlock->variablesAtTail.setArgumentFirstTime(argument, setArgument);
2339 processSetLocalQueue();
2341 // Don't extend over jump destinations.
2342 if (m_currentIndex == limit) {
2343 // Ordinarily we want to plant a jump. But refuse to do this if the block is
2344 // empty. This is a special case for inlining, which might otherwise create
2345 // some empty blocks in some cases. When parseBlock() returns with an empty
2346 // block, it will get repurposed instead of creating a new one. Note that this
2347 // logic relies on every bytecode resulting in one or more nodes, which would
2348 // be true anyway except for op_loop_hint, which emits a Phantom to force this
2350 if (!m_currentBlock->isEmpty())
2351 addToGraph(Jump, OpInfo(m_currentIndex));
2352 return shouldContinueParsing;
2355 // Switch on the current bytecode opcode.
2356 Instruction* currentInstruction = instructionsBegin + m_currentIndex;
2357 m_currentInstruction = currentInstruction; // Some methods want to use this, and we'd rather not thread it through calls.
2358 OpcodeID opcodeID = interpreter->getOpcodeID(currentInstruction->u.opcode);
2360 if (Options::verboseDFGByteCodeParsing())
2361 dataLog(" parsing ", currentCodeOrigin(), "\n");
2363 if (m_graph.compilation()) {
2364 addToGraph(CountExecution, OpInfo(m_graph.compilation()->executionCounterFor(
2365 Profiler::OriginStack(*m_vm->m_perBytecodeProfiler, m_codeBlock, currentCodeOrigin()))));
2370 // === Function entry opcodes ===
2373 Node* undefined = addToGraph(JSConstant, OpInfo(m_constantUndefined));
2374 // Initialize all locals to undefined.
2375 for (int i = 0; i < m_inlineStackTop->m_codeBlock->m_numVars; ++i)
2376 set(virtualRegisterForLocal(i), undefined, ImmediateNakedSet);
2377 if (m_inlineStackTop->m_codeBlock->specializationKind() == CodeForConstruct)
2378 set(virtualRegisterForArgument(0), undefined, ImmediateNakedSet);
2379 NEXT_OPCODE(op_enter);
2382 case op_touch_entry:
2383 if (m_inlineStackTop->m_codeBlock->symbolTable()->m_functionEnteredOnce.isStillValid())
2384 addToGraph(ForceOSRExit);
2385 NEXT_OPCODE(op_touch_entry);
2388 Node* op1 = getThis();
2389 if (op1->op() != ToThis) {
2390 Structure* cachedStructure = currentInstruction[2].u.structure.get();
2391 if (currentInstruction[2].u.toThisStatus != ToThisOK
2393 || cachedStructure->classInfo()->methodTable.toThis != JSObject::info()->methodTable.toThis
2394 || m_inlineStackTop->m_profiledBlock->couldTakeSlowCase(m_currentIndex)
2395 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)
2396 || (op1->op() == GetLocal && op1->variableAccessData()->structureCheckHoistingFailed())) {
2397 setThis(addToGraph(ToThis, op1));
2401 OpInfo(m_graph.addStructureSet(cachedStructure)),
2405 NEXT_OPCODE(op_to_this);
2408 case op_create_this: {
2409 int calleeOperand = currentInstruction[2].u.operand;
2410 Node* callee = get(VirtualRegister(calleeOperand));
2411 bool alreadyEmitted = false;
2412 if (JSFunction* function = callee->dynamicCastConstant<JSFunction*>()) {
2413 if (Structure* structure = function->allocationStructure()) {
2414 addToGraph(AllocationProfileWatchpoint, OpInfo(m_graph.freeze(function)));
2415 // The callee is still live up to this point.
2416 addToGraph(Phantom, callee);
2417 set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(NewObject, OpInfo(structure)));
2418 alreadyEmitted = true;
2421 if (!alreadyEmitted) {
2422 set(VirtualRegister(currentInstruction[1].u.operand),
2423 addToGraph(CreateThis, OpInfo(currentInstruction[3].u.operand), callee));
2425 NEXT_OPCODE(op_create_this);
2428 case op_new_object: {
2429 set(VirtualRegister(currentInstruction[1].u.operand),
2430 addToGraph(NewObject,
2431 OpInfo(currentInstruction[3].u.objectAllocationProfile->structure())));
2432 NEXT_OPCODE(op_new_object);
2435 case op_new_array: {
2436 int startOperand = currentInstruction[2].u.operand;
2437 int numOperands = currentInstruction[3].u.operand;
2438 ArrayAllocationProfile* profile = currentInstruction[4].u.arrayAllocationProfile;
2439 for (int operandIdx = startOperand; operandIdx > startOperand - numOperands; --operandIdx)
2440 addVarArgChild(get(VirtualRegister(operandIdx)));
2441 set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(Node::VarArg, NewArray, OpInfo(profile->selectIndexingType()), OpInfo(0)));
2442 NEXT_OPCODE(op_new_array);
2445 case op_new_array_with_size: {
2446 int lengthOperand = currentInstruction[2].u.operand;
2447 ArrayAllocationProfile* profile = currentInstruction[3].u.arrayAllocationProfile;
2448 set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(NewArrayWithSize, OpInfo(profile->selectIndexingType()), get(VirtualRegister(lengthOperand))));
2449 NEXT_OPCODE(op_new_array_with_size);
2452 case op_new_array_buffer: {
2453 int startConstant = currentInstruction[2].u.operand;
2454 int numConstants = currentInstruction[3].u.operand;
2455 ArrayAllocationProfile* profile = currentInstruction[4].u.arrayAllocationProfile;
2456 NewArrayBufferData data;
2457 data.startConstant = m_inlineStackTop->m_constantBufferRemap[startConstant];
2458 data.numConstants = numConstants;
2459 data.indexingType = profile->selectIndexingType();
2461 // If this statement has never executed, we'll have the wrong indexing type in the profile.
2462 for (int i = 0; i < numConstants; ++i) {
2464 leastUpperBoundOfIndexingTypeAndValue(
2466 m_codeBlock->constantBuffer(data.startConstant)[i]);
2469 m_graph.m_newArrayBufferData.append(data);
2470 set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(NewArrayBuffer, OpInfo(&m_graph.m_newArrayBufferData.last())));
2471 NEXT_OPCODE(op_new_array_buffer);
2474 case op_new_regexp: {
2475 set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(NewRegexp, OpInfo(currentInstruction[2].u.operand)));
2476 NEXT_OPCODE(op_new_regexp);
2479 case op_get_callee: {
2480 JSCell* cachedFunction = currentInstruction[2].u.jsCell.get();
2482 || m_inlineStackTop->m_profiledBlock->couldTakeSlowCase(m_currentIndex)
2483 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell)) {
2484 set(VirtualRegister(currentInstruction[1].u.operand), get(VirtualRegister(JSStack::Callee)));
2486 FrozenValue* frozen = m_graph.freeze(cachedFunction);
2487 ASSERT(cachedFunction->inherits(JSFunction::info()));
2488 Node* actualCallee = get(VirtualRegister(JSStack::Callee));
2489 addToGraph(CheckCell, OpInfo(frozen), actualCallee);
2490 set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(JSConstant, OpInfo(frozen)));
2492 NEXT_OPCODE(op_get_callee);
2495 // === Bitwise operations ===
2498 Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2499 Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2500 set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(BitAnd, op1, op2));
2501 NEXT_OPCODE(op_bitand);
2505 Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2506 Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2507 set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(BitOr, op1, op2));
2508 NEXT_OPCODE(op_bitor);
2512 Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2513 Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2514 set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(BitXor, op1, op2));
2515 NEXT_OPCODE(op_bitxor);
2519 Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2520 Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2521 set(VirtualRegister(currentInstruction[1].u.operand),
2522 addToGraph(BitRShift, op1, op2));
2523 NEXT_OPCODE(op_rshift);
2527 Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2528 Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2529 set(VirtualRegister(currentInstruction[1].u.operand),
2530 addToGraph(BitLShift, op1, op2));
2531 NEXT_OPCODE(op_lshift);
2535 Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2536 Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2537 set(VirtualRegister(currentInstruction[1].u.operand),
2538 addToGraph(BitURShift, op1, op2));
2539 NEXT_OPCODE(op_urshift);
2543 set(VirtualRegister(currentInstruction[1].u.operand),
2544 makeSafe(addToGraph(UInt32ToNumber, get(VirtualRegister(currentInstruction[2].u.operand)))));
2545 NEXT_OPCODE(op_unsigned);
2548 // === Increment/Decrement opcodes ===
2551 int srcDst = currentInstruction[1].u.operand;
2552 VirtualRegister srcDstVirtualRegister = VirtualRegister(srcDst);
2553 Node* op = get(srcDstVirtualRegister);
2554 set(srcDstVirtualRegister, makeSafe(addToGraph(ArithAdd, op, addToGraph(JSConstant, OpInfo(m_constantOne)))));
2555 NEXT_OPCODE(op_inc);
2559 int srcDst = currentInstruction[1].u.operand;
2560 VirtualRegister srcDstVirtualRegister = VirtualRegister(srcDst);
2561 Node* op = get(srcDstVirtualRegister);
2562 set(srcDstVirtualRegister, makeSafe(addToGraph(ArithSub, op, addToGraph(JSConstant, OpInfo(m_constantOne)))));
2563 NEXT_OPCODE(op_dec);
2566 // === Arithmetic operations ===
2569 Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2570 Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2571 if (op1->hasNumberResult() && op2->hasNumberResult())
2572 set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithAdd, op1, op2)));
2574 set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ValueAdd, op1, op2)));
2575 NEXT_OPCODE(op_add);
2579 Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2580 Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2581 set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithSub, op1, op2)));
2582 NEXT_OPCODE(op_sub);
2586 Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2587 set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithNegate, op1)));
2588 NEXT_OPCODE(op_negate);
2592 // Multiply requires that the inputs are not truncated, unfortunately.
2593 Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2594 Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2595 set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithMul, op1, op2)));
2596 NEXT_OPCODE(op_mul);
2600 Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2601 Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2602 set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithMod, op1, op2)));
2603 NEXT_OPCODE(op_mod);
2607 Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2608 Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2609 set(VirtualRegister(currentInstruction[1].u.operand), makeDivSafe(addToGraph(ArithDiv, op1, op2)));
2610 NEXT_OPCODE(op_div);
2613 // === Misc operations ===
2616 addToGraph(Breakpoint);
2617 NEXT_OPCODE(op_debug);
2619 case op_profile_will_call: {
2620 addToGraph(ProfileWillCall);
2621 NEXT_OPCODE(op_profile_will_call);
2624 case op_profile_did_call: {
2625 addToGraph(ProfileDidCall);
2626 NEXT_OPCODE(op_profile_did_call);
2630 Node* op = get(VirtualRegister(currentInstruction[2].u.operand));
2631 set(VirtualRegister(currentInstruction[1].u.operand), op);
2632 NEXT_OPCODE(op_mov);
2635 case op_captured_mov: {
2636 Node* op = get(VirtualRegister(currentInstruction[2].u.operand));
2637 if (VariableWatchpointSet* set = currentInstruction[3].u.watchpointSet) {
2638 if (set->state() != IsInvalidated)
2639 addToGraph(NotifyWrite, OpInfo(set), op);
2641 set(VirtualRegister(currentInstruction[1].u.operand), op);
2642 NEXT_OPCODE(op_captured_mov);
2645 case op_check_has_instance:
2646 addToGraph(CheckHasInstance, get(VirtualRegister(currentInstruction[3].u.operand)));
2647 NEXT_OPCODE(op_check_has_instance);
2649 case op_instanceof: {
2650 Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2651 Node* prototype = get(VirtualRegister(currentInstruction[3].u.operand));
2652 set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(InstanceOf, value, prototype));
2653 NEXT_OPCODE(op_instanceof);
2656 case op_is_undefined: {
2657 Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2658 set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsUndefined, value));
2659 NEXT_OPCODE(op_is_undefined);
2662 case op_is_boolean: {
2663 Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2664 set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsBoolean, value));
2665 NEXT_OPCODE(op_is_boolean);
2668 case op_is_number: {
2669 Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2670 set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsNumber, value));
2671 NEXT_OPCODE(op_is_number);
2674 case op_is_string: {
2675 Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2676 set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsString, value));
2677 NEXT_OPCODE(op_is_string);
2680 case op_is_object: {
2681 Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2682 set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsObject, value));
2683 NEXT_OPCODE(op_is_object);
2686 case op_is_function: {
2687 Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2688 set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsFunction, value));
2689 NEXT_OPCODE(op_is_function);
2693 Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2694 set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, value));
2695 NEXT_OPCODE(op_not);
2698 case op_to_primitive: {
2699 Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2700 set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(ToPrimitive, value));
2701 NEXT_OPCODE(op_to_primitive);
2705 int startOperand = currentInstruction[2].u.operand;
2706 int numOperands = currentInstruction[3].u.operand;
2708 // X86 doesn't have enough registers to compile MakeRope with three arguments.
2709 // Rather than try to be clever, we just make MakeRope dumber on this processor.
2710 const unsigned maxRopeArguments = 2;
2712 const unsigned maxRopeArguments = 3;
2714 auto toStringNodes = std::make_unique<Node*[]>(numOperands);
2715 for (int i = 0; i < numOperands; i++)
2716 toStringNodes[i] = addToGraph(ToString, get(VirtualRegister(startOperand - i)));
2718 for (int i = 0; i < numOperands; i++)
2719 addToGraph(Phantom, toStringNodes[i]);
2721 Node* operands[AdjacencyList::Size];
2722 unsigned indexInOperands = 0;
2723 for (unsigned i = 0; i < AdjacencyList::Size; ++i)
2725 for (int operandIdx = 0; operandIdx < numOperands; ++operandIdx) {
2726 if (indexInOperands == maxRopeArguments) {
2727 operands[0] = addToGraph(MakeRope, operands[0], operands[1], operands[2]);
2728 for (unsigned i = 1; i < AdjacencyList::Size; ++i)
2730 indexInOperands = 1;
2733 ASSERT(indexInOperands < AdjacencyList::Size);
2734 ASSERT(indexInOperands < maxRopeArguments);
2735 operands[indexInOperands++] = toStringNodes[operandIdx];
2737 set(VirtualRegister(currentInstruction[1].u.operand),
2738 addToGraph(MakeRope, operands[0], operands[1], operands[2]));
2739 NEXT_OPCODE(op_strcat);
2743 Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2744 Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2745 set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareLess, op1, op2));
2746 NEXT_OPCODE(op_less);
2750 Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2751 Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2752 set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareLessEq, op1, op2));
2753 NEXT_OPCODE(op_lesseq);
2757 Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2758 Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2759 set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareGreater, op1, op2));
2760 NEXT_OPCODE(op_greater);
2763 case op_greatereq: {
2764 Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2765 Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2766 set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareGreaterEq, op1, op2));
2767 NEXT_OPCODE(op_greatereq);
2771 Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2772 Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2773 set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareEq, op1, op2));
2778 Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2779 set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareEqConstant, value, addToGraph(JSConstant, OpInfo(m_constantNull))));
2780 NEXT_OPCODE(op_eq_null);
2784 Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2785 Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2786 set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareStrictEq, op1, op2));
2787 NEXT_OPCODE(op_stricteq);
2791 Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2792 Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2793 set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, addToGraph(CompareEq, op1, op2)));
2794 NEXT_OPCODE(op_neq);
2798 Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2799 set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, addToGraph(CompareEqConstant, value, addToGraph(JSConstant, OpInfo(m_constantNull)))));
2800 NEXT_OPCODE(op_neq_null);
2803 case op_nstricteq: {
2804 Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2805 Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2806 Node* invertedResult;
2807 invertedResult = addToGraph(CompareStrictEq, op1, op2);
2808 set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, invertedResult));
2809 NEXT_OPCODE(op_nstricteq);
2812 // === Property access operations ===
2814 case op_get_by_val: {
2815 SpeculatedType prediction = getPredictionWithoutOSRExit();
2817 Node* base = get(VirtualRegister(currentInstruction[2].u.operand));
2818 ArrayMode arrayMode = getArrayModeConsideringSlowPath(currentInstruction[4].u.arrayProfile, Array::Read);
2819 Node* property = get(VirtualRegister(currentInstruction[3].u.operand));
2820 Node* getByVal = addToGraph(GetByVal, OpInfo(arrayMode.asWord()), OpInfo(prediction), base, property);
2821 set(VirtualRegister(currentInstruction[1].u.operand), getByVal);
2823 NEXT_OPCODE(op_get_by_val);
2826 case op_put_by_val_direct:
2827 case op_put_by_val: {
2828 Node* base = get(VirtualRegister(currentInstruction[1].u.operand));
2830 ArrayMode arrayMode = getArrayModeConsideringSlowPath(currentInstruction[4].u.arrayProfile, Array::Write);
2832 Node* property = get(VirtualRegister(currentInstruction[2].u.operand));
2833 Node* value = get(VirtualRegister(currentInstruction[3].u.operand));
2835 addVarArgChild(base);
2836 addVarArgChild(property);
2837 addVarArgChild(value);
2838 addVarArgChild(0); // Leave room for property storage.
2839 addVarArgChild(0); // Leave room for length.
2840 addToGraph(Node::VarArg, opcodeID == op_put_by_val_direct ? PutByValDirect : PutByVal, OpInfo(arrayMode.asWord()), OpInfo(0));
2842 NEXT_OPCODE(op_put_by_val);
2846 case op_get_by_id_out_of_line:
2847 case op_get_array_length: {
2848 SpeculatedType prediction = getPrediction();
2850 Node* base = get(VirtualRegister(currentInstruction[2].u.operand));
2851 unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand];
2853 StringImpl* uid = m_graph.identifiers()[identifierNumber];
2854 GetByIdStatus getByIdStatus = GetByIdStatus::computeFor(
2855 m_inlineStackTop->m_profiledBlock, m_dfgCodeBlock,
2856 m_inlineStackTop->m_stubInfos, m_dfgStubInfos,
2857 currentCodeOrigin(), uid);
2860 currentInstruction[1].u.operand, prediction, base, identifierNumber, getByIdStatus);
2862 NEXT_OPCODE(op_get_by_id);
2865 case op_put_by_id_out_of_line:
2866 case op_put_by_id_transition_direct:
2867 case op_put_by_id_transition_normal:
2868 case op_put_by_id_transition_direct_out_of_line:
2869 case op_put_by_id_transition_normal_out_of_line: {
2870 Node* value = get(VirtualRegister(currentInstruction[3].u.operand));
2871 Node* base = get(VirtualRegister(currentInstruction[1].u.operand));
2872 unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
2873 bool direct = currentInstruction[8].u.operand;
2875 PutByIdStatus putByIdStatus = PutByIdStatus::computeFor(
2876 m_inlineStackTop->m_profiledBlock, m_dfgCodeBlock,
2877 m_inlineStackTop->m_stubInfos, m_dfgStubInfos,
2878 currentCodeOrigin(), m_graph.identifiers()[identifierNumber]);
2880 handlePutById(base, identifierNumber, value, putByIdStatus, direct);
2881 NEXT_OPCODE(op_put_by_id);
2884 case op_init_global_const_nop: {
2885 NEXT_OPCODE(op_init_global_const_nop);
2888 case op_init_global_const: {
2889 Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2892 OpInfo(m_inlineStackTop->m_codeBlock->globalObject()->assertRegisterIsInThisObject(currentInstruction[1].u.registerPointer)),
2894 NEXT_OPCODE(op_init_global_const);
2897 // === Block terminators. ===
2900 int relativeOffset = currentInstruction[1].u.operand;
2901 if (relativeOffset <= 0)
2903 addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
2904 LAST_OPCODE(op_jmp);
2908 unsigned relativeOffset = currentInstruction[2].u.operand;
2909 Node* condition = get(VirtualRegister(currentInstruction[1].u.operand));
2910 addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jtrue))), condition);
2911 LAST_OPCODE(op_jtrue);
2915 unsigned relativeOffset = currentInstruction[2].u.operand;
2916 Node* condition = get(VirtualRegister(currentInstruction[1].u.operand));
2917 addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jfalse), m_currentIndex + relativeOffset)), condition);
2918 LAST_OPCODE(op_jfalse);
2922 unsigned relativeOffset = currentInstruction[2].u.operand;
2923 Node* value = get(VirtualRegister(currentInstruction[1].u.operand));
2924 Node* condition = addToGraph(CompareEqConstant, value, addToGraph(JSConstant, OpInfo(m_constantNull)));
2925 addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jeq_null))), condition);
2926 LAST_OPCODE(op_jeq_null);
2929 case op_jneq_null: {
2930 unsigned relativeOffset = currentInstruction[2].u.operand;
2931 Node* value = get(VirtualRegister(currentInstruction[1].u.operand));
2932 Node* condition = addToGraph(CompareEqConstant, value, addToGraph(JSConstant, OpInfo(m_constantNull)));
2933 addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jneq_null), m_currentIndex + relativeOffset)), condition);
2934 LAST_OPCODE(op_jneq_null);
2938 unsigned relativeOffset = currentInstruction[3].u.operand;
2939 Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
2940 Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
2941 Node* condition = addToGraph(CompareLess, op1, op2);
2942 addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jless))), condition);
2943 LAST_OPCODE(op_jless);
2947 unsigned relativeOffset = currentInstruction[3].u.operand;
2948 Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
2949 Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
2950 Node* condition = addToGraph(CompareLessEq, op1, op2);
2951 addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jlesseq))), condition);
2952 LAST_OPCODE(op_jlesseq);
2956 unsigned relativeOffset = currentInstruction[3].u.operand;
2957 Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
2958 Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
2959 Node* condition = addToGraph(CompareGreater, op1, op2);
2960 addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jgreater))), condition);
2961 LAST_OPCODE(op_jgreater);
2964 case op_jgreatereq: {
2965 unsigned relativeOffset = currentInstruction[3].u.operand;
2966 Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
2967 Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
2968 Node* condition = addToGraph(CompareGreaterEq, op1, op2);
2969 addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jgreatereq))), condition);
2970 LAST_OPCODE(op_jgreatereq);
2974 unsigned relativeOffset = currentInstruction[3].u.operand;
2975 Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
2976 Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
2977 Node* condition = addToGraph(CompareLess, op1, op2);
2978 addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jnless), m_currentIndex + relativeOffset)), condition);
2979 LAST_OPCODE(op_jnless);
2983 unsigned relativeOffset = currentInstruction[3].u.operand;
2984 Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
2985 Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
2986 Node* condition = addToGraph(CompareLessEq, op1, op2);
2987 addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jnlesseq), m_currentIndex + relativeOffset)), condition);
2988 LAST_OPCODE(op_jnlesseq);
2991 case op_jngreater: {
2992 unsigned relativeOffset = currentInstruction[3].u.operand;
2993 Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
2994 Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
2995 Node* condition = addToGraph(CompareGreater, op1, op2);
2996 addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jngreater), m_currentIndex + relativeOffset)), condition);
2997 LAST_OPCODE(op_jngreater);
3000 case op_jngreatereq: {
3001 unsigned relativeOffset = currentInstruction[3].u.operand;
3002 Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
3003 Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
3004 Node* condition = addToGraph(CompareGreaterEq, op1, op2);
3005 addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jngreatereq), m_currentIndex + relativeOffset)), condition);
3006 LAST_OPCODE(op_jngreatereq);
3009 case op_switch_imm: {
3010 SwitchData& data = *m_graph.m_switchData.add();
3011 data.kind = SwitchImm;
3012 data.switchTableIndex = m_inlineStackTop->m_switchRemap[currentInstruction[1].u.operand];
3013 data.fallThrough.setBytecodeIndex(m_currentIndex + currentInstruction[2].u.operand);
3014 SimpleJumpTable& table = m_codeBlock->switchJumpTable(data.switchTableIndex);
3015 for (unsigned i = 0; i < table.branchOffsets.size(); ++i) {
3016 if (!table.branchOffsets[i])
3018 unsigned target = m_currentIndex + table.branchOffsets[i];
3019 if (target == data.fallThrough.bytecodeIndex())
3021 data.cases.append(SwitchCase::withBytecodeIndex(m_graph.freeze(jsNumber(static_cast<int32_t>(table.min + i))), target));
3023 flushIfTerminal(data);
3024 addToGraph(Switch, OpInfo(&data), get(VirtualRegister(currentInstruction[3].u.operand)));
3025 LAST_OPCODE(op_switch_imm);
3028 case op_switch_char: {
3029 SwitchData& data = *m_graph.m_switchData.add();
3030 data.kind = SwitchChar;
3031 data.switchTableIndex = m_inlineStackTop->m_switchRemap[currentInstruction[1].u.operand];
3032 data.fallThrough.setBytecodeIndex(m_currentIndex + currentInstruction[2].u.operand);
3033 SimpleJumpTable& table = m_codeBlock->switchJumpTable(data.switchTableIndex);
3034 for (unsigned i = 0; i < table.branchOffsets.size(); ++i) {
3035 if (!table.branchOffsets[i])
3037 unsigned target = m_currentIndex + table.branchOffsets[i];
3038 if (target == data.fallThrough.bytecodeIndex())
3041 SwitchCase::withBytecodeIndex(LazyJSValue::singleCharacterString(table.min + i), target));
3043 flushIfTerminal(data);
3044 addToGraph(Switch, OpInfo(&data), get(VirtualRegister(currentInstruction[3].u.operand)));
3045 LAST_OPCODE(op_switch_char);
3048 case op_switch_string: {
3049 SwitchData& data = *m_graph.m_switchData.add();
3050 data.kind = SwitchString;
3051 data.switchTableIndex = currentInstruction[1].u.operand;
3052 data.fallThrough.setBytecodeIndex(m_currentIndex + currentInstruction[2].u.operand);
3053 StringJumpTable& table = m_codeBlock->stringSwitchJumpTable(data.switchTableIndex);
3054 StringJumpTable::StringOffsetTable::iterator iter;
3055 StringJumpTable::StringOffsetTable::iterator end = table.offsetTable.end();
3056 for (iter = table.offsetTable.begin(); iter != end; ++iter) {
3057 unsigned target = m_currentIndex + iter->value.branchOffset;
3058 if (target == data.fallThrough.bytecodeIndex())
3061 SwitchCase::withBytecodeIndex(LazyJSValue::knownStringImpl(iter->key.get()), target));
3063 flushIfTerminal(data);
3064 addToGraph(Switch, OpInfo(&data), get(VirtualRegister(currentInstruction[3].u.operand)));
3065 LAST_OPCODE(op_switch_string);
3070 if (inlineCallFrame()) {
3071 if (m_inlineStackTop->m_returnValue.isValid())
3072 setDirect(m_inlineStackTop->m_returnValue, get(VirtualRegister(currentInstruction[1].u.operand)), ImmediateSetWithFlush);
3073 m_inlineStackTop->m_didReturn = true;
3074 if (m_inlineStackTop->m_unlinkedBlocks.isEmpty()) {
3075 // If we're returning from the first block, then we're done parsing.
3076 ASSERT(m_inlineStackTop->m_callsiteBlockHead == m_graph.lastBlock());
3077 shouldContinueParsing = false;
3078 LAST_OPCODE(op_ret);
3080 // If inlining created blocks, and we're doing a return, then we need some
3082 ASSERT(m_inlineStackTop->m_unlinkedBlocks.last().m_block == m_graph.lastBlock());
3083 m_inlineStackTop->m_unlinkedBlocks.last().m_needsNormalLinking = false;
3085 if (m_currentIndex + OPCODE_LENGTH(op_ret) != m_inlineStackTop->m_codeBlock->instructions().size() || m_inlineStackTop->m_didEarlyReturn) {
3086 ASSERT(m_currentIndex + OPCODE_LENGTH(op_ret) <= m_inlineStackTop->m_codeBlock->instructions().size());
3087 addToGraph(Jump, OpInfo(0));
3088 m_inlineStackTop->m_unlinkedBlocks.last().m_needsEarlyReturnLinking = true;
3089 m_inlineStackTop->m_didEarlyReturn = true;
3091 LAST_OPCODE(op_ret);
3093 addToGraph(Return, get(VirtualRegister(currentInstruction[1].u.operand)));
3094 LAST_OPCODE(op_ret);
3098 ASSERT(!inlineCallFrame());
3099 addToGraph(Return, get(VirtualRegister(currentInstruction[1].u.operand)));
3100 LAST_OPCODE(op_end);
3103 addToGraph(Throw, get(VirtualRegister(currentInstruction[1].u.operand)));
3105 addToGraph(Unreachable);
3106 LAST_OPCODE(op_throw);
3108 case op_throw_static_error:
3109 addToGraph(ThrowReferenceError);
3111 addToGraph(Unreachable);
3112 LAST_OPCODE(op_throw_static_error);
3115 handleCall(currentInstruction, Call, CodeForCall);
3116 NEXT_OPCODE(op_call);
3119 handleCall(currentInstruction, Construct, CodeForConstruct);
3120 NEXT_OPCODE(op_construct);
3122 case op_call_varargs: {
3123 int result = currentInstruction[1].u.operand;
3124 int callee = currentInstruction[2].u.operand;
3125 int thisReg = currentInstruction[3].u.operand;
3126 int arguments = currentInstruction[4].u.operand;
3127 int firstFreeReg = currentInstruction[5].u.operand;
3129 ASSERT(inlineCallFrame());
3130 ASSERT_UNUSED(arguments, arguments == m_inlineStackTop->m_codeBlock->argumentsRegister().offset());
3131 ASSERT(!m_inlineStackTop->m_codeBlock->symbolTable()->slowArguments());
3133 addToGraph(CheckArgumentsNotCreated);
3135 unsigned argCount = inlineCallFrame()->arguments.size();
3137 // Let's compute the register offset. We start with the last used register, and
3138 // then adjust for the things we want in the call frame.
3139 int registerOffset = firstFreeReg + 1;
3140 registerOffset -= argCount; // We will be passing some arguments.
3141 registerOffset -= JSStack::CallFrameHeaderSize; // We will pretend to have a call frame header.
3143 // Get the alignment right.
3144 registerOffset = -WTF::roundUpToMultipleOf(
3145 stackAlignmentRegisters(),
3149 m_inlineStackTop->remapOperand(
3150 VirtualRegister(registerOffset)).toLocal());
3152 // The bytecode wouldn't have set up the arguments. But we'll do it and make it
3153 // look like the bytecode had done it.
3154 int nextRegister = registerOffset + JSStack::CallFrameHeaderSize;
3155 set(VirtualRegister(nextRegister++), get(VirtualRegister(thisReg)), ImmediateNakedSet);
3156 for (unsigned argument = 1; argument < argCount; ++argument)
3157 set(VirtualRegister(nextRegister++), get(virtualRegisterForArgument(argument)), ImmediateNakedSet);
3160 result, Call, CodeForCall, OPCODE_LENGTH(op_call_varargs),
3161 callee, argCount, registerOffset);
3162 NEXT_OPCODE(op_call_varargs);
3166 // Statically speculate for now. It makes sense to let speculate-only jneq_ptr
3167 // support simmer for a while before making it more general, since it's
3168 // already gnarly enough as it is.
3169 ASSERT(pointerIsFunction(currentInstruction[2].u.specialPointer));
3172 OpInfo(m_graph.freeze(static_cast<JSCell*>(actualPointerFor(
3173 m_inlineStackTop->m_codeBlock, currentInstruction[2].u.specialPointer)))),
3174 get(VirtualRegister(currentInstruction[1].u.operand)));
3175 addToGraph(Jump, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jneq_ptr)));
3176 LAST_OPCODE(op_jneq_ptr);
3178 case op_resolve_scope: {
3179 int dst = currentInstruction[1].u.operand;
3180 ResolveType resolveType = static_cast<ResolveType>(currentInstruction[3].u.operand);
3181 unsigned depth = currentInstruction[4].u.operand;
3183 // get_from_scope and put_to_scope depend on this watchpoint forcing OSR exit, so they don't add their own watchpoints.
3184 if (needsVarInjectionChecks(resolveType))
3185 addToGraph(VarInjectionWatchpoint);
3187 switch (resolveType) {
3188 case GlobalProperty:
3190 case GlobalPropertyWithVarInjectionChecks:
3191 case GlobalVarWithVarInjectionChecks:
3192 set(VirtualRegister(dst), weakJSConstant(m_inlineStackTop->m_codeBlock->globalObject()));
3195 case ClosureVarWithVarInjectionChecks: {
3196 JSActivation* activation = currentInstruction[5].u.activation.get();
3198 && activation->symbolTable()->m_functionEnteredOnce.isStillValid()) {
3199 addToGraph(FunctionReentryWatchpoint, OpInfo(activation->symbolTable()));
3200 set(VirtualRegister(dst), weakJSConstant(activation));
3203 set(VirtualRegister(dst), getScope(depth));
3207 RELEASE_ASSERT_NOT_REACHED();
3210 NEXT_OPCODE(op_resolve_scope);
3213 case op_get_from_scope: {
3214 int dst = currentInstruction[1].u.operand;
3215 int scope = currentInstruction[2].u.operand;
3216 unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand];
3217 StringImpl* uid = m_graph.identifiers()[identifierNumber];
3218 ResolveType resolveType = ResolveModeAndType(currentInstruction[4].u.operand).type();
3220 Structure* structure = 0;
3221 WatchpointSet* watchpoints = 0;
3224 ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
3225 if (resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks)
3226 watchpoints = currentInstruction[5].u.watchpointSet;
3228 structure = currentInstruction[5].u.structure.get();
3229 operand = reinterpret_cast<uintptr_t>(currentInstruction[6].u.pointer);
3232 UNUSED_PARAM(watchpoints); // We will use this in the future. For now we set it as a way of documenting the fact that that's what index 5 is in GlobalVar mode.
3234 JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
3236 switch (resolveType) {
3237 case GlobalProperty:
3238 case GlobalPropertyWithVarInjectionChecks: {
3239 SpeculatedType prediction = getPrediction();
3240 GetByIdStatus status = GetByIdStatus::computeFor(*m_vm, structure, uid);
3241 if (status.state() != GetByIdStatus::Simple
3242 || status.numVariants() != 1
3243 || status[0].structureSet().size() != 1) {
3244 set(VirtualRegister(dst), addToGraph(GetByIdFlush, OpInfo(identifierNumber), OpInfo(prediction), get(VirtualRegister(scope))));
3247 Node* base = cellConstantWithStructureCheck(globalObject, status[0].structureSet().onlyStructure());
3248 addToGraph(Phantom, get(VirtualRegister(scope)));
3249 set(VirtualRegister(dst), handleGetByOffset(prediction, base, status[0].structureSet(), identifierNumber, operand));
3253 case GlobalVarWithVarInjectionChecks: {
3254 addToGraph(Phantom, get(VirtualRegister(scope)));
3255 SymbolTableEntry entry = globalObject->symbolTable()->get(uid);
3256 VariableWatchpointSet* watchpointSet = entry.watchpointSet();
3257 JSValue inferredValue =
3258 watchpointSet ? watchpointSet->inferredValue() : JSValue();
3259 if (!inferredValue) {
3260 SpeculatedType prediction = getPrediction();
3261 set(VirtualRegister(dst), addToGraph(GetGlobalVar, OpInfo(operand), OpInfo(prediction)));
3265 addToGraph(VariableWatchpoint, OpInfo(watchpointSet));
3266 set(VirtualRegister(dst), weakJSConstant(inferredValue));
3270 case ClosureVarWithVarInjectionChecks: {
3271 Node* scopeNode = get(VirtualRegister(scope));
3272 if (JSActivation* activation = m_graph.tryGetActivation(scopeNode)) {
3273 SymbolTable* symbolTable = activation->symbolTable();
3274 ConcurrentJITLocker locker(symbolTable->m_lock);
3275 SymbolTable::Map::iterator iter = symbolTable->find(locker, uid);
3276 ASSERT(iter != symbolTable->end(locker));
3277 VariableWatchpointSet* watchpointSet = iter->value.watchpointSet();
3278 if (watchpointSet) {
3279 if (JSValue value = watchpointSet->inferredValue()) {
3280 addToGraph(Phantom, scopeNode);
3281 addToGraph(VariableWatchpoint, OpInfo(watchpointSet));
3282 set(VirtualRegister(dst), weakJSConstant(value));
3287 SpeculatedType prediction = getPrediction();
3288 set(VirtualRegister(dst),
3289 addToGraph(GetClosureVar, OpInfo(operand), OpInfo(prediction),
3290 addToGraph(GetClosureRegisters, scopeNode)));
3294 RELEASE_ASSERT_NOT_REACHED();
3297 NEXT_OPCODE(op_get_from_scope);
3300 case op_put_to_scope: {
3301 unsigned scope = currentInstruction[1].u.operand;
3302 unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
3303 unsigned value = currentInstruction[3].u.operand;
3304 ResolveType resolveType = ResolveModeAndType(currentInstruction[4].u.operand).type();
3305 StringImpl* uid = m_graph.identifiers()[identifierNumber];
3307 Structure* structure = 0;
3308 VariableWatchpointSet* watchpoints = 0;
3311 ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
3312 if (resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks)
3313 watchpoints = currentInstruction[5].u.watchpointSet;
3315 structure = currentInstruction[5].u.structure.get();
3316 operand = reinterpret_cast<uintptr_t>(currentInstruction[6].u.pointer);
3319 JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
3321 switch (resolveType) {
3322 case GlobalProperty:
3323 case GlobalPropertyWithVarInjectionChecks: {
3324 PutByIdStatus status = PutByIdStatus::computeFor(*m_vm, globalObject, structure, uid, false);
3325 if (status.numVariants() != 1
3326 || status[0].kind() != PutByIdVariant::Replace
3327 || status[0].structure().size() != 1) {
3328 addToGraph(PutById, OpInfo(identifierNumber), get(VirtualRegister(scope)), get(VirtualRegister(value)));
3331 ASSERT(status[0].structure().onlyStructure() == structure);
3332 Node* base = cellConstantWithStructureCheck(globalObject, structure);
3333 addToGraph(Phantom, get(VirtualRegister(scope)));
3334 handlePutByOffset(base, identifierNumber, static_cast<PropertyOffset>(operand), get(VirtualRegister(value)));
3335 // Keep scope alive until after put.
3336 addToGraph(Phantom, get(VirtualRegister(scope)));
3340 case GlobalVarWithVarInjectionChecks: {
3341 SymbolTableEntry entry = globalObject->symbolTable()->get(uid);
3342 ASSERT(watchpoints == entry.watchpointSet());
3343 Node* valueNode = get(VirtualRegister(value));
3344 addToGraph(PutGlobalVar, OpInfo(operand), valueNode);
3345 if (watchpoints->state() != IsInvalidated)
3346 addToGraph(NotifyWrite, OpInfo(watchpoints), valueNode);
3347 // Keep scope alive until after put.
3348 addToGraph(Phantom, get(VirtualRegister(scope)));
3352 case ClosureVarWithVarInjectionChecks: {
3353 Node* scopeNode = get(VirtualRegister(scope));
3354 Node* scopeRegisters = addToGraph(GetClosureRegisters, scopeNode);
3355 addToGraph(PutClosureVar, OpInfo(operand), scopeNode, scopeRegisters, get(VirtualRegister(value)));
3359 RELEASE_ASSERT_NOT_REACHED();
3362 NEXT_OPCODE(op_put_to_scope);
3365 case op_loop_hint: {
3366 // Baseline->DFG OSR jumps between loop hints. The DFG assumes that Baseline->DFG
3367 // OSR can only happen at basic block boundaries. Assert that these two statements
3369 RELEASE_ASSERT(m_currentIndex == blockBegin);
3371 // We never do OSR into an inlined code block. That could not happen, since OSR
3372 // looks up the code block that is the replacement for the baseline JIT code
3373 // block. Hence, machine code block = true code block = not inline code block.
3374 if (!m_inlineStackTop->m_caller)
3375 m_currentBlock->isOSRTarget = true;
3377 addToGraph(LoopHint);
3379 if (m_vm->watchdog && m_vm->watchdog->isEnabled())
3380 addToGraph(CheckWatchdogTimer);
3382 NEXT_OPCODE(op_loop_hint);
3385 case op_init_lazy_reg: {
3386 set(VirtualRegister(currentInstruction[1].u.operand), jsConstant(JSValue()));
3387 ASSERT(operandIsLocal(currentInstruction[1].u.operand));
3388 m_graph.m_lazyVars.set(VirtualRegister(currentInstruction[1].u.operand).toLocal());
3389 NEXT_OPCODE(op_init_lazy_reg);
3392 case op_create_activation: {
3393 set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CreateActivation, get(VirtualRegister(currentInstruction[1].u.operand))));
3394 NEXT_OPCODE(op_create_activation);
3397 case op_create_arguments: {
3398 m_graph.m_hasArguments = true;
3399 Node* createArguments = addToGraph(CreateArguments, get(VirtualRegister(currentInstruction[1].u.operand)));
3400 set(VirtualRegister(currentInstruction[1].u.operand), createArguments);
3401 set(unmodifiedArgumentsRegister(VirtualRegister(currentInstruction[1].u.operand)), createArguments);
3402 NEXT_OPCODE(op_create_arguments);
3405 case op_tear_off_activation: {
3406 addToGraph(TearOffActivation, get(VirtualRegister(currentInstruction[1].u.operand)));
3407 NEXT_OPCODE(op_tear_off_activation);
3410 case op_tear_off_arguments: {
3411 m_graph.m_hasArguments = true;
3412 addToGraph(TearOffArguments, get(unmodifiedArgumentsRegister(VirtualRegister(currentInstruction[1].u.operand))), get(VirtualRegister(currentInstruction[2].u.operand)));
3413 NEXT_OPCODE(op_tear_off_arguments);
3416 case op_get_arguments_length: {
3417 m_graph.m_hasArguments = true;
3418 set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(GetMyArgumentsLengthSafe));
3419 NEXT_OPCODE(op_get_arguments_length);
3422 case op_get_argument_by_val: {
3423 m_graph.m_hasArguments = true;
3424 set(VirtualRegister(currentInstruction[1].u.operand),
3426 GetMyArgumentByValSafe, OpInfo(0), OpInfo(getPrediction()),
3427 get(VirtualRegister(currentInstruction[3].u.operand))));
3428 NEXT_OPCODE(op_get_argument_by_val);
3432 if (!currentInstruction[3].u.operand) {
3433 set(VirtualRegister(currentInstruction[1].u.operand),
3434 addToGraph(NewFunctionNoCheck, OpInfo(currentInstruction[2].u.operand)));
3436 set(VirtualRegister(currentInstruction[1].u.operand),
3439 OpInfo(currentInstruction[2].u.operand),
3440 get(VirtualRegister(currentInstruction[1].u.operand))));
3442 NEXT_OPCODE(op_new_func);
3445 case op_new_captured_func: {
3446 Node* function = addToGraph(
3447 NewFunctionNoCheck, OpInfo(currentInstruction[2].u.operand));
3448 if (VariableWatchpointSet* set = currentInstruction[3].u.watchpointSet)
3449 addToGraph(NotifyWrite, OpInfo(set), function);
3450 set(VirtualRegister(currentInstruction[1].u.operand), function);
3451 NEXT_OPCODE(op_new_captured_func);
3454 case op_new_func_exp: {
3455 set(VirtualRegister(currentInstruction[1].u.operand),
3456 addToGraph(NewFunctionExpression, OpInfo(currentInstruction[2].u.operand)));
3457 NEXT_OPCODE(op_new_func_exp);
3461 set(VirtualRegister(currentInstruction[1].u.operand),
3462 addToGraph(TypeOf, get(VirtualRegister(currentInstruction[2].u.operand))));
3463 NEXT_OPCODE(op_typeof);
3466 case op_to_number: {
3467 Node* node = get(VirtualRegister(currentInstruction[2].u.operand));
3468 addToGraph(Phantom, Edge(node, NumberUse));
3469 set(VirtualRegister(currentInstruction[1].u.operand), node);
3470 NEXT_OPCODE(op_to_number);
3474 set(VirtualRegister(currentInstruction[1].u.operand),
3475 addToGraph(In, get(VirtualRegister(currentInstruction[2].u.operand)), get(VirtualRegister(currentInstruction[3].u.operand))));
3479 case op_get_enumerable_length: {
3480 set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(GetEnumerableLength,
3481 get(VirtualRegister(currentInstruction[2].u.operand))));
3482 NEXT_OPCODE(op_get_enumerable_length);
3485 case op_has_generic_property: {
3486 set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(HasGenericProperty,
3487 get(VirtualRegister(currentInstruction[2].u.operand)),
3488 get(VirtualRegister(currentInstruction[3].u.operand))));
3489 NEXT_OPCODE(op_has_generic_property);
3492 case op_has_structure_property: {
3493 set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(HasStructureProperty,
3494 get(VirtualRegister(currentInstruction[2].u.operand)),
3495 get(VirtualRegister(currentInstruction[3].u.operand)),
3496 get(VirtualRegister(currentInstruction[4].u.operand))));
3497 NEXT_OPCODE(op_has_structure_property);
3500 case op_has_indexed_property: {
3501 Node* base = get(VirtualRegister(currentInstruction[2].u.operand));
3502 ArrayMode arrayMode = getArrayModeConsideringSlowPath(currentInstruction[4].u.arrayProfile, Array::Read);
3503 Node* property = get(VirtualRegister(currentInstruction[3].u.operand));
3504 Node* hasIterableProperty = addToGraph(HasIndexedProperty, OpInfo(arrayMode.asWord()), base, property);
3505 set(VirtualRegister(currentInstruction[1].u.operand), hasIterableProperty);
3506 NEXT_OPCODE(op_has_indexed_property);
3509 case op_get_direct_pname: {
3510 SpeculatedType prediction = getPredictionWithoutOSRExit();
3512 Node* base = get(VirtualRegister(currentInstruction[2].u.operand));
3513 Node* property = get(VirtualRegister(currentInstruction[3].u.operand));
3514 Node* index = get(VirtualRegister(currentInstruction[4].u.operand));
3515 Node* enumerator = get(VirtualRegister(currentInstruction[5].u.operand));
3517 addVarArgChild(base);
3518 addVarArgChild(property);
3519 addVarArgChild(index);
3520 addVarArgChild(enumerator);
3521 set(VirtualRegister(currentInstruction[1].u.operand),
3522 addToGraph(Node::VarArg, GetDirectPname, OpInfo(0), OpInfo(prediction)));
3524 NEXT_OPCODE(op_get_direct_pname);
3527 case op_get_structure_property_enumerator: {
3528 set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(GetStructurePropertyEnumerator,
3529 get(VirtualRegister(currentInstruction[2].u.operand)),
3530 get(VirtualRegister(currentInstruction[3].u.operand))));
3531 NEXT_OPCODE(op_get_structure_property_enumerator);
3534 case op_get_generic_property_enumerator: {
3535 set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(GetGenericPropertyEnumerator,
3536 get(VirtualRegister(currentInstruction[2].u.operand)),
3537 get(VirtualRegister(currentInstruction[3].u.operand)),
3538 get(VirtualRegister(currentInstruction[4].u.operand))));
3539 NEXT_OPCODE(op_get_generic_property_enumerator);
3542 case op_next_enumerator_pname: {
3543 set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(GetEnumeratorPname,
3544 get(VirtualRegister(currentInstruction[2].u.operand)),
3545 get(VirtualRegister(currentInstruction[3].u.operand))));
3546 NEXT_OPCODE(op_next_enumerator_pname);
3549 case op_to_index_string: {
3550 set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(ToIndexString,
3551 get(VirtualRegister(currentInstruction[2].u.operand))));
3552 NEXT_OPCODE(op_to_index_string);
3556 // Parse failed! This should not happen because the capabilities checker
3557 // should have caught it.
3558 RELEASE_ASSERT_NOT_REACHED();
3564 void ByteCodeParser::linkBlock(BasicBlock* block, Vector<BasicBlock*>& possibleTargets)
3566 ASSERT(!block->isLinked);
3567 ASSERT(!block->isEmpty());
3568 Node* node = block->last();
3569 ASSERT(node->isTerminal());
3571 switch (node->op()) {
3573 node->targetBlock() = blockForBytecodeOffset(possibleTargets, node->targetBytecodeOffsetDuringParsing());
3577 BranchData* data = node->branchData();
3578 data->taken.block = blockForBytecodeOffset(possibleTargets, data->takenBytecodeIndex());
3579 data->notTaken.block = blockForBytecodeOffset(possibleTargets, data->notTakenBytecodeIndex());
3584 SwitchData* data = node->switchData();
3585 for (unsigned i = node->switchData()->cases.size(); i--;)
3586 data->cases[i].target.block = blockForBytecodeOffset(possibleTargets, data->cases[i].target.bytecodeIndex());
3587 data->fallThrough.block = blockForBytecodeOffset(possibleTargets, data->fallThrough.bytecodeIndex());
3596 dataLog("Marking ", RawPointer(block), " as linked (actually did linking)\n");