2 * Copyright (C) 2011-2016 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "DFGByteCodeParser.h"
31 #include "ArrayConstructor.h"
32 #include "BasicBlockLocation.h"
33 #include "CallLinkStatus.h"
34 #include "CodeBlock.h"
35 #include "CodeBlockWithJITType.h"
36 #include "DFGAbstractHeap.h"
37 #include "DFGArrayMode.h"
38 #include "DFGCapabilities.h"
39 #include "DFGClobberize.h"
40 #include "DFGClobbersExitState.h"
42 #include "DFGJITCode.h"
43 #include "GetByIdStatus.h"
45 #include "JSLexicalEnvironment.h"
46 #include "JSCInlines.h"
47 #include "JSModuleEnvironment.h"
48 #include "ObjectConstructor.h"
49 #include "PreciseJumpTargets.h"
50 #include "PutByIdFlags.h"
51 #include "PutByIdStatus.h"
52 #include "StackAlignment.h"
53 #include "StringConstructor.h"
54 #include "StructureStubInfo.h"
56 #include <wtf/CommaPrinter.h>
57 #include <wtf/HashMap.h>
58 #include <wtf/MathExtras.h>
59 #include <wtf/StdLibExtras.h>
61 namespace JSC { namespace DFG {
63 static const bool verbose = false;
65 class ConstantBufferKey {
73 ConstantBufferKey(WTF::HashTableDeletedValueType)
79 ConstantBufferKey(CodeBlock* codeBlock, unsigned index)
80 : m_codeBlock(codeBlock)
85 bool operator==(const ConstantBufferKey& other) const
87 return m_codeBlock == other.m_codeBlock
88 && m_index == other.m_index;
93 return WTF::PtrHash<CodeBlock*>::hash(m_codeBlock) ^ m_index;
96 bool isHashTableDeletedValue() const
98 return !m_codeBlock && m_index;
101 CodeBlock* codeBlock() const { return m_codeBlock; }
102 unsigned index() const { return m_index; }
105 CodeBlock* m_codeBlock;
109 struct ConstantBufferKeyHash {
110 static unsigned hash(const ConstantBufferKey& key) { return key.hash(); }
111 static bool equal(const ConstantBufferKey& a, const ConstantBufferKey& b)
116 static const bool safeToCompareToEmptyOrDeleted = true;
119 } } // namespace JSC::DFG
123 template<typename T> struct DefaultHash;
124 template<> struct DefaultHash<JSC::DFG::ConstantBufferKey> {
125 typedef JSC::DFG::ConstantBufferKeyHash Hash;
128 template<typename T> struct HashTraits;
129 template<> struct HashTraits<JSC::DFG::ConstantBufferKey> : SimpleClassHashTraits<JSC::DFG::ConstantBufferKey> { };
133 namespace JSC { namespace DFG {
135 // === ByteCodeParser ===
137 // This class is used to compile the dataflow graph from a CodeBlock.
138 class ByteCodeParser {
140 ByteCodeParser(Graph& graph)
142 , m_codeBlock(graph.m_codeBlock)
143 , m_profiledBlock(graph.m_profiledBlock)
147 , m_constantUndefined(graph.freeze(jsUndefined()))
148 , m_constantNull(graph.freeze(jsNull()))
149 , m_constantNaN(graph.freeze(jsNumber(PNaN)))
150 , m_constantOne(graph.freeze(jsNumber(1)))
151 , m_numArguments(m_codeBlock->numParameters())
152 , m_numLocals(m_codeBlock->m_numCalleeLocals)
153 , m_parameterSlots(0)
154 , m_numPassedVarArgs(0)
155 , m_inlineStackTop(0)
156 , m_currentInstruction(0)
157 , m_hasDebuggerEnabled(graph.hasDebuggerEnabled())
159 ASSERT(m_profiledBlock);
162 // Parse a full CodeBlock of bytecode.
166 struct InlineStackEntry;
168 // Just parse from m_currentIndex to the end of the current CodeBlock.
169 void parseCodeBlock();
171 void ensureLocals(unsigned newNumLocals)
173 if (newNumLocals <= m_numLocals)
175 m_numLocals = newNumLocals;
176 for (size_t i = 0; i < m_graph.numBlocks(); ++i)
177 m_graph.block(i)->ensureLocals(newNumLocals);
180 // Helper for min and max.
181 template<typename ChecksFunctor>
182 bool handleMinMax(int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& insertChecks);
184 void refineStatically(CallLinkStatus&, Node* callTarget);
185 // Handle calls. This resolves issues surrounding inlining and intrinsics.
186 enum Terminality { Terminal, NonTerminal };
187 Terminality handleCall(
188 int result, NodeType op, InlineCallFrame::Kind, unsigned instructionSize,
189 Node* callTarget, int argCount, int registerOffset, CallLinkStatus,
190 SpeculatedType prediction);
191 Terminality handleCall(
192 int result, NodeType op, CallMode, unsigned instructionSize,
193 Node* callTarget, int argCount, int registerOffset, CallLinkStatus);
194 Terminality handleCall(int result, NodeType op, CallMode, unsigned instructionSize, int callee, int argCount, int registerOffset);
195 Terminality handleCall(Instruction* pc, NodeType op, CallMode);
196 Terminality handleVarargsCall(Instruction* pc, NodeType op, CallMode);
197 void emitFunctionChecks(CallVariant, Node* callTarget, VirtualRegister thisArgumnt);
198 void emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis);
199 unsigned inliningCost(CallVariant, int argumentCountIncludingThis, CallMode); // Return UINT_MAX if it's not an inlining candidate. By convention, intrinsics have a cost of 1.
200 // Handle inlining. Return true if it succeeded, false if we need to plant a call.
201 bool handleInlining(Node* callTargetNode, int resultOperand, const CallLinkStatus&, int registerOffset, VirtualRegister thisArgument, VirtualRegister argumentsArgument, unsigned argumentsOffset, int argumentCountIncludingThis, unsigned nextOffset, NodeType callOp, InlineCallFrame::Kind, SpeculatedType prediction);
202 enum CallerLinkability { CallerDoesNormalLinking, CallerLinksManually };
203 template<typename ChecksFunctor>
204 bool attemptToInlineCall(Node* callTargetNode, int resultOperand, CallVariant, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind, CallerLinkability, SpeculatedType prediction, unsigned& inliningBalance, const ChecksFunctor& insertChecks);
205 template<typename ChecksFunctor>
206 void inlineCall(Node* callTargetNode, int resultOperand, CallVariant, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind, CallerLinkability, const ChecksFunctor& insertChecks);
207 void cancelLinkingForBlock(InlineStackEntry*, BasicBlock*); // Only works when the given block is the last one to have been added for that inline stack entry.
208 // Handle intrinsic functions. Return true if it succeeded, false if we need to plant a call.
209 template<typename ChecksFunctor>
210 bool handleIntrinsicCall(Node* callee, int resultOperand, Intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks);
211 template<typename ChecksFunctor>
212 bool handleIntrinsicGetter(int resultOperand, const GetByIdVariant& intrinsicVariant, Node* thisNode, const ChecksFunctor& insertChecks);
213 template<typename ChecksFunctor>
214 bool handleTypedArrayConstructor(int resultOperand, InternalFunction*, int registerOffset, int argumentCountIncludingThis, TypedArrayType, const ChecksFunctor& insertChecks);
215 template<typename ChecksFunctor>
216 bool handleConstantInternalFunction(Node* callTargetNode, int resultOperand, InternalFunction*, int registerOffset, int argumentCountIncludingThis, CodeSpecializationKind, const ChecksFunctor& insertChecks);
217 Node* handlePutByOffset(Node* base, unsigned identifier, PropertyOffset, const InferredType::Descriptor&, Node* value);
218 Node* handleGetByOffset(SpeculatedType, Node* base, unsigned identifierNumber, PropertyOffset, const InferredType::Descriptor&, NodeType = GetByOffset);
220 // Create a presence ObjectPropertyCondition based on some known offset and structure set. Does not
221 // check the validity of the condition, but it may return a null one if it encounters a contradiction.
222 ObjectPropertyCondition presenceLike(
223 JSObject* knownBase, UniquedStringImpl*, PropertyOffset, const StructureSet&);
225 // Attempt to watch the presence of a property. It will watch that the property is present in the same
226 // way as in all of the structures in the set. It may emit code instead of just setting a watchpoint.
227 // Returns true if this all works out.
228 bool checkPresenceLike(JSObject* knownBase, UniquedStringImpl*, PropertyOffset, const StructureSet&);
229 void checkPresenceLike(Node* base, UniquedStringImpl*, PropertyOffset, const StructureSet&);
231 // Works with both GetByIdVariant and the setter form of PutByIdVariant.
232 template<typename VariantType>
233 Node* load(SpeculatedType, Node* base, unsigned identifierNumber, const VariantType&);
235 Node* store(Node* base, unsigned identifier, const PutByIdVariant&, Node* value);
237 void handleTryGetById(int destinationOperand, Node* base, unsigned identifierNumber, const GetByIdStatus&);
239 int destinationOperand, SpeculatedType, Node* base, unsigned identifierNumber, GetByIdStatus, AccessType);
241 Node* base, unsigned identifierNumber, Node* value, const PutByIdStatus&, bool isDirect);
243 Node* base, unsigned identifierNumber, Node* value, const PutByIdStatus&,
246 // Either register a watchpoint or emit a check for this condition. Returns false if the
247 // condition no longer holds, and therefore no reasonable check can be emitted.
248 bool check(const ObjectPropertyCondition&);
250 GetByOffsetMethod promoteToConstant(GetByOffsetMethod);
252 // Either register a watchpoint or emit a check for this condition. It must be a Presence
253 // condition. It will attempt to promote a Presence condition to an Equivalence condition.
254 // Emits code for the loaded value that the condition guards, and returns a node containing
255 // the loaded value. Returns null if the condition no longer holds.
256 GetByOffsetMethod planLoad(const ObjectPropertyCondition&);
257 Node* load(SpeculatedType, unsigned identifierNumber, const GetByOffsetMethod&, NodeType = GetByOffset);
258 Node* load(SpeculatedType, const ObjectPropertyCondition&, NodeType = GetByOffset);
260 // Calls check() for each condition in the set: that is, it either emits checks or registers
261 // watchpoints (or a combination of the two) to make the conditions hold. If any of those
262 // conditions are no longer checkable, returns false.
263 bool check(const ObjectPropertyConditionSet&);
265 // Calls check() for those conditions that aren't the slot base, and calls load() for the slot
266 // base. Does a combination of watchpoint registration and check emission to guard the
267 // conditions, and emits code to load the value from the slot base. Returns a node containing
268 // the loaded value. Returns null if any of the conditions were no longer checkable.
269 GetByOffsetMethod planLoad(const ObjectPropertyConditionSet&);
270 Node* load(SpeculatedType, const ObjectPropertyConditionSet&, NodeType = GetByOffset);
272 void prepareToParseBlock();
275 // Parse a single basic block of bytecode instructions.
276 bool parseBlock(unsigned limit);
277 // Link block successors.
278 void linkBlock(BasicBlock*, Vector<BasicBlock*>& possibleTargets);
279 void linkBlocks(Vector<UnlinkedBlock>& unlinkedBlocks, Vector<BasicBlock*>& possibleTargets);
281 VariableAccessData* newVariableAccessData(VirtualRegister operand)
283 ASSERT(!operand.isConstant());
285 m_graph.m_variableAccessData.append(VariableAccessData(operand));
286 return &m_graph.m_variableAccessData.last();
289 // Get/Set the operands/result of a bytecode instruction.
290 Node* getDirect(VirtualRegister operand)
292 ASSERT(!operand.isConstant());
294 // Is this an argument?
295 if (operand.isArgument())
296 return getArgument(operand);
299 return getLocal(operand);
302 Node* get(VirtualRegister operand)
304 if (operand.isConstant()) {
305 unsigned constantIndex = operand.toConstantIndex();
306 unsigned oldSize = m_constants.size();
307 if (constantIndex >= oldSize || !m_constants[constantIndex]) {
308 const CodeBlock& codeBlock = *m_inlineStackTop->m_codeBlock;
309 JSValue value = codeBlock.getConstant(operand.offset());
310 SourceCodeRepresentation sourceCodeRepresentation = codeBlock.constantSourceCodeRepresentation(operand.offset());
311 if (constantIndex >= oldSize) {
312 m_constants.grow(constantIndex + 1);
313 for (unsigned i = oldSize; i < m_constants.size(); ++i)
314 m_constants[i] = nullptr;
317 Node* constantNode = nullptr;
318 if (sourceCodeRepresentation == SourceCodeRepresentation::Double)
319 constantNode = addToGraph(DoubleConstant, OpInfo(m_graph.freezeStrong(jsDoubleNumber(value.asNumber()))));
321 constantNode = addToGraph(JSConstant, OpInfo(m_graph.freezeStrong(value)));
322 m_constants[constantIndex] = constantNode;
324 ASSERT(m_constants[constantIndex]);
325 return m_constants[constantIndex];
328 if (inlineCallFrame()) {
329 if (!inlineCallFrame()->isClosureCall) {
330 JSFunction* callee = inlineCallFrame()->calleeConstant();
331 if (operand.offset() == JSStack::Callee)
332 return weakJSConstant(callee);
334 } else if (operand.offset() == JSStack::Callee) {
335 // We have to do some constant-folding here because this enables CreateThis folding. Note
336 // that we don't have such watchpoint-based folding for inlined uses of Callee, since in that
337 // case if the function is a singleton then we already know it.
338 if (FunctionExecutable* executable = jsDynamicCast<FunctionExecutable*>(m_codeBlock->ownerExecutable())) {
339 InferredValue* singleton = executable->singletonFunction();
340 if (JSValue value = singleton->inferredValue()) {
341 m_graph.watchpoints().addLazily(singleton);
342 JSFunction* function = jsCast<JSFunction*>(value);
343 return weakJSConstant(function);
346 return addToGraph(GetCallee);
349 return getDirect(m_inlineStackTop->remapOperand(operand));
353 // A normal set which follows a two-phase commit that spans code origins. During
354 // the current code origin it issues a MovHint, and at the start of the next
355 // code origin there will be a SetLocal. If the local needs flushing, the second
356 // SetLocal will be preceded with a Flush.
359 // A set where the SetLocal happens immediately and there is still a Flush. This
360 // is relevant when assigning to a local in tricky situations for the delayed
361 // SetLocal logic but where we know that we have not performed any side effects
362 // within this code origin. This is a safe replacement for NormalSet anytime we
363 // know that we have not yet performed side effects in this code origin.
364 ImmediateSetWithFlush,
366 // A set where the SetLocal happens immediately and we do not Flush it even if
367 // this is a local that is marked as needing it. This is relevant when
368 // initializing locals at the top of a function.
371 Node* setDirect(VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
373 addToGraph(MovHint, OpInfo(operand.offset()), value);
375 // We can't exit anymore because our OSR exit state has changed.
378 DelayedSetLocal delayed(currentCodeOrigin(), operand, value);
380 if (setMode == NormalSet) {
381 m_setLocalQueue.append(delayed);
385 return delayed.execute(this, setMode);
388 void processSetLocalQueue()
390 for (unsigned i = 0; i < m_setLocalQueue.size(); ++i)
391 m_setLocalQueue[i].execute(this);
392 m_setLocalQueue.resize(0);
395 Node* set(VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
397 return setDirect(m_inlineStackTop->remapOperand(operand), value, setMode);
400 Node* injectLazyOperandSpeculation(Node* node)
402 ASSERT(node->op() == GetLocal);
403 ASSERT(node->origin.semantic.bytecodeIndex == m_currentIndex);
404 ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
405 LazyOperandValueProfileKey key(m_currentIndex, node->local());
406 SpeculatedType prediction = m_inlineStackTop->m_lazyOperands.prediction(locker, key);
407 node->variableAccessData()->predict(prediction);
411 // Used in implementing get/set, above, where the operand is a local variable.
412 Node* getLocal(VirtualRegister operand)
414 unsigned local = operand.toLocal();
416 Node* node = m_currentBlock->variablesAtTail.local(local);
418 // This has two goals: 1) link together variable access datas, and 2)
419 // try to avoid creating redundant GetLocals. (1) is required for
420 // correctness - no other phase will ensure that block-local variable
421 // access data unification is done correctly. (2) is purely opportunistic
422 // and is meant as an compile-time optimization only.
424 VariableAccessData* variable;
427 variable = node->variableAccessData();
429 switch (node->op()) {
433 return node->child1().node();
438 variable = newVariableAccessData(operand);
440 node = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variable)));
441 m_currentBlock->variablesAtTail.local(local) = node;
444 Node* setLocal(const CodeOrigin& semanticOrigin, VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
446 CodeOrigin oldSemanticOrigin = m_currentSemanticOrigin;
447 m_currentSemanticOrigin = semanticOrigin;
449 unsigned local = operand.toLocal();
451 if (setMode != ImmediateNakedSet) {
452 ArgumentPosition* argumentPosition = findArgumentPositionForLocal(operand);
453 if (argumentPosition)
454 flushDirect(operand, argumentPosition);
455 else if (m_hasDebuggerEnabled && operand == m_codeBlock->scopeRegister())
459 VariableAccessData* variableAccessData = newVariableAccessData(operand);
460 variableAccessData->mergeStructureCheckHoistingFailed(
461 m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex, BadCache));
462 variableAccessData->mergeCheckArrayHoistingFailed(
463 m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex, BadIndexingType));
464 Node* node = addToGraph(SetLocal, OpInfo(variableAccessData), value);
465 m_currentBlock->variablesAtTail.local(local) = node;
467 m_currentSemanticOrigin = oldSemanticOrigin;
471 // Used in implementing get/set, above, where the operand is an argument.
472 Node* getArgument(VirtualRegister operand)
474 unsigned argument = operand.toArgument();
475 ASSERT(argument < m_numArguments);
477 Node* node = m_currentBlock->variablesAtTail.argument(argument);
479 VariableAccessData* variable;
482 variable = node->variableAccessData();
484 switch (node->op()) {
488 return node->child1().node();
493 variable = newVariableAccessData(operand);
495 node = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variable)));
496 m_currentBlock->variablesAtTail.argument(argument) = node;
499 Node* setArgument(const CodeOrigin& semanticOrigin, VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
501 CodeOrigin oldSemanticOrigin = m_currentSemanticOrigin;
502 m_currentSemanticOrigin = semanticOrigin;
504 unsigned argument = operand.toArgument();
505 ASSERT(argument < m_numArguments);
507 VariableAccessData* variableAccessData = newVariableAccessData(operand);
509 // Always flush arguments, except for 'this'. If 'this' is created by us,
510 // then make sure that it's never unboxed.
512 if (setMode != ImmediateNakedSet)
513 flushDirect(operand);
514 } else if (m_codeBlock->specializationKind() == CodeForConstruct)
515 variableAccessData->mergeShouldNeverUnbox(true);
517 variableAccessData->mergeStructureCheckHoistingFailed(
518 m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex, BadCache));
519 variableAccessData->mergeCheckArrayHoistingFailed(
520 m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex, BadIndexingType));
521 Node* node = addToGraph(SetLocal, OpInfo(variableAccessData), value);
522 m_currentBlock->variablesAtTail.argument(argument) = node;
524 m_currentSemanticOrigin = oldSemanticOrigin;
528 ArgumentPosition* findArgumentPositionForArgument(int argument)
530 InlineStackEntry* stack = m_inlineStackTop;
531 while (stack->m_inlineCallFrame)
532 stack = stack->m_caller;
533 return stack->m_argumentPositions[argument];
536 ArgumentPosition* findArgumentPositionForLocal(VirtualRegister operand)
538 for (InlineStackEntry* stack = m_inlineStackTop; ; stack = stack->m_caller) {
539 InlineCallFrame* inlineCallFrame = stack->m_inlineCallFrame;
540 if (!inlineCallFrame)
542 if (operand.offset() < static_cast<int>(inlineCallFrame->stackOffset + JSStack::CallFrameHeaderSize))
544 if (operand.offset() == inlineCallFrame->stackOffset + CallFrame::thisArgumentOffset())
546 if (operand.offset() >= static_cast<int>(inlineCallFrame->stackOffset + CallFrame::thisArgumentOffset() + inlineCallFrame->arguments.size()))
548 int argument = VirtualRegister(operand.offset() - inlineCallFrame->stackOffset).toArgument();
549 return stack->m_argumentPositions[argument];
554 ArgumentPosition* findArgumentPosition(VirtualRegister operand)
556 if (operand.isArgument())
557 return findArgumentPositionForArgument(operand.toArgument());
558 return findArgumentPositionForLocal(operand);
561 void flush(VirtualRegister operand)
563 flushDirect(m_inlineStackTop->remapOperand(operand));
566 void flushDirect(VirtualRegister operand)
568 flushDirect(operand, findArgumentPosition(operand));
571 void flushDirect(VirtualRegister operand, ArgumentPosition* argumentPosition)
573 ASSERT(!operand.isConstant());
575 Node* node = m_currentBlock->variablesAtTail.operand(operand);
577 VariableAccessData* variable;
580 variable = node->variableAccessData();
582 variable = newVariableAccessData(operand);
584 node = addToGraph(Flush, OpInfo(variable));
585 m_currentBlock->variablesAtTail.operand(operand) = node;
586 if (argumentPosition)
587 argumentPosition->addVariable(variable);
590 void flush(InlineStackEntry* inlineStackEntry)
593 if (InlineCallFrame* inlineCallFrame = inlineStackEntry->m_inlineCallFrame) {
594 ASSERT(!m_hasDebuggerEnabled);
595 numArguments = inlineCallFrame->arguments.size();
596 if (inlineCallFrame->isClosureCall)
597 flushDirect(inlineStackEntry->remapOperand(VirtualRegister(JSStack::Callee)));
598 if (inlineCallFrame->isVarargs())
599 flushDirect(inlineStackEntry->remapOperand(VirtualRegister(JSStack::ArgumentCount)));
601 numArguments = inlineStackEntry->m_codeBlock->numParameters();
602 for (unsigned argument = numArguments; argument-- > 1;)
603 flushDirect(inlineStackEntry->remapOperand(virtualRegisterForArgument(argument)));
604 if (m_hasDebuggerEnabled)
605 flush(m_codeBlock->scopeRegister());
608 void flushForTerminal()
610 for (InlineStackEntry* inlineStackEntry = m_inlineStackTop; inlineStackEntry; inlineStackEntry = inlineStackEntry->m_caller)
611 flush(inlineStackEntry);
614 void flushForReturn()
616 flush(m_inlineStackTop);
619 void flushIfTerminal(SwitchData& data)
621 if (data.fallThrough.bytecodeIndex() > m_currentIndex)
624 for (unsigned i = data.cases.size(); i--;) {
625 if (data.cases[i].target.bytecodeIndex() > m_currentIndex)
632 // Assumes that the constant should be strongly marked.
633 Node* jsConstant(JSValue constantValue)
635 return addToGraph(JSConstant, OpInfo(m_graph.freezeStrong(constantValue)));
638 Node* weakJSConstant(JSValue constantValue)
640 return addToGraph(JSConstant, OpInfo(m_graph.freeze(constantValue)));
643 // Helper functions to get/set the this value.
646 return get(m_inlineStackTop->m_codeBlock->thisRegister());
649 void setThis(Node* value)
651 set(m_inlineStackTop->m_codeBlock->thisRegister(), value);
654 InlineCallFrame* inlineCallFrame()
656 return m_inlineStackTop->m_inlineCallFrame;
659 bool allInlineFramesAreTailCalls()
661 return !inlineCallFrame() || !inlineCallFrame()->getCallerSkippingTailCalls();
664 CodeOrigin currentCodeOrigin()
666 return CodeOrigin(m_currentIndex, inlineCallFrame());
669 NodeOrigin currentNodeOrigin()
674 if (m_currentSemanticOrigin.isSet())
675 semantic = m_currentSemanticOrigin;
677 semantic = currentCodeOrigin();
679 forExit = currentCodeOrigin();
681 return NodeOrigin(semantic, forExit, m_exitOK);
684 BranchData* branchData(unsigned taken, unsigned notTaken)
686 // We assume that branches originating from bytecode always have a fall-through. We
687 // use this assumption to avoid checking for the creation of terminal blocks.
688 ASSERT((taken > m_currentIndex) || (notTaken > m_currentIndex));
689 BranchData* data = m_graph.m_branchData.add();
690 *data = BranchData::withBytecodeIndices(taken, notTaken);
694 Node* addToGraph(Node* node)
696 if (Options::verboseDFGByteCodeParsing())
697 dataLog(" appended ", node, " ", Graph::opName(node->op()), "\n");
698 m_currentBlock->append(node);
699 if (clobbersExitState(m_graph, node))
704 Node* addToGraph(NodeType op, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
706 Node* result = m_graph.addNode(
707 SpecNone, op, currentNodeOrigin(), Edge(child1), Edge(child2),
709 return addToGraph(result);
711 Node* addToGraph(NodeType op, Edge child1, Edge child2 = Edge(), Edge child3 = Edge())
713 Node* result = m_graph.addNode(
714 SpecNone, op, currentNodeOrigin(), child1, child2, child3);
715 return addToGraph(result);
717 Node* addToGraph(NodeType op, OpInfo info, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
719 Node* result = m_graph.addNode(
720 SpecNone, op, currentNodeOrigin(), info, Edge(child1), Edge(child2),
722 return addToGraph(result);
724 Node* addToGraph(NodeType op, OpInfo info1, OpInfo info2, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
726 Node* result = m_graph.addNode(
727 SpecNone, op, currentNodeOrigin(), info1, info2,
728 Edge(child1), Edge(child2), Edge(child3));
729 return addToGraph(result);
732 Node* addToGraph(Node::VarArgTag, NodeType op, OpInfo info1, OpInfo info2)
734 Node* result = m_graph.addNode(
735 SpecNone, Node::VarArg, op, currentNodeOrigin(), info1, info2,
736 m_graph.m_varArgChildren.size() - m_numPassedVarArgs, m_numPassedVarArgs);
739 m_numPassedVarArgs = 0;
744 void addVarArgChild(Node* child)
746 m_graph.m_varArgChildren.append(Edge(child));
747 m_numPassedVarArgs++;
750 Node* addCallWithoutSettingResult(
751 NodeType op, OpInfo opInfo, Node* callee, int argCount, int registerOffset,
754 addVarArgChild(callee);
755 size_t frameSize = JSStack::CallFrameHeaderSize + argCount;
756 size_t alignedFrameSize = WTF::roundUpToMultipleOf(stackAlignmentRegisters(), frameSize);
757 size_t parameterSlots = alignedFrameSize - JSStack::CallerFrameAndPCSize;
759 if (parameterSlots > m_parameterSlots)
760 m_parameterSlots = parameterSlots;
762 for (int i = 0; i < argCount; ++i)
763 addVarArgChild(get(virtualRegisterForArgument(i, registerOffset)));
765 return addToGraph(Node::VarArg, op, opInfo, prediction);
769 int result, NodeType op, OpInfo opInfo, Node* callee, int argCount, int registerOffset,
770 SpeculatedType prediction)
772 if (op == TailCall) {
773 if (allInlineFramesAreTailCalls())
774 return addCallWithoutSettingResult(op, OpInfo(), callee, argCount, registerOffset, OpInfo());
775 op = TailCallInlinedCaller;
779 Node* call = addCallWithoutSettingResult(
780 op, opInfo, callee, argCount, registerOffset, OpInfo(prediction));
781 VirtualRegister resultReg(result);
782 if (resultReg.isValid())
783 set(resultReg, call);
787 Node* cellConstantWithStructureCheck(JSCell* object, Structure* structure)
789 // FIXME: This should route to emitPropertyCheck, not the other way around. But currently,
790 // this gets no profit from using emitPropertyCheck() since we'll non-adaptively watch the
791 // object's structure as soon as we make it a weakJSCosntant.
792 Node* objectNode = weakJSConstant(object);
793 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(structure)), objectNode);
797 SpeculatedType getPredictionWithoutOSRExit(unsigned bytecodeIndex)
799 SpeculatedType prediction;
800 CodeBlock* profiledBlock = nullptr;
803 ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
804 prediction = m_inlineStackTop->m_profiledBlock->valueProfilePredictionForBytecodeOffset(locker, bytecodeIndex);
806 if (prediction == SpecNone) {
807 // If we have no information about the values this
808 // node generates, we check if by any chance it is
809 // a tail call opcode. In that case, we walk up the
810 // inline frames to find a call higher in the call
811 // chain and use its prediction. If we only have
812 // inlined tail call frames, we use SpecFullTop
813 // to avoid a spurious OSR exit.
814 Instruction* instruction = m_inlineStackTop->m_profiledBlock->instructions().begin() + bytecodeIndex;
815 OpcodeID opcodeID = m_vm->interpreter->getOpcodeID(instruction->u.opcode);
819 case op_tail_call_varargs: {
820 if (!inlineCallFrame()) {
821 prediction = SpecFullTop;
824 CodeOrigin* codeOrigin = inlineCallFrame()->getCallerSkippingTailCalls();
826 prediction = SpecFullTop;
829 InlineStackEntry* stack = m_inlineStackTop;
830 while (stack->m_inlineCallFrame != codeOrigin->inlineCallFrame)
831 stack = stack->m_caller;
832 bytecodeIndex = codeOrigin->bytecodeIndex;
833 profiledBlock = stack->m_profiledBlock;
844 ConcurrentJITLocker locker(profiledBlock->m_lock);
845 prediction = profiledBlock->valueProfilePredictionForBytecodeOffset(locker, bytecodeIndex);
851 SpeculatedType getPrediction(unsigned bytecodeIndex)
853 SpeculatedType prediction = getPredictionWithoutOSRExit(bytecodeIndex);
855 if (prediction == SpecNone) {
856 // We have no information about what values this node generates. Give up
857 // on executing this code, since we're likely to do more damage than good.
858 addToGraph(ForceOSRExit);
864 SpeculatedType getPredictionWithoutOSRExit()
866 return getPredictionWithoutOSRExit(m_currentIndex);
869 SpeculatedType getPrediction()
871 return getPrediction(m_currentIndex);
874 ArrayMode getArrayMode(ArrayProfile* profile, Array::Action action)
876 ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
877 profile->computeUpdatedPrediction(locker, m_inlineStackTop->m_profiledBlock);
878 bool makeSafe = profile->outOfBounds(locker);
879 return ArrayMode::fromObserved(locker, profile, action, makeSafe);
882 ArrayMode getArrayMode(ArrayProfile* profile)
884 return getArrayMode(profile, Array::Read);
887 Node* makeSafe(Node* node)
889 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
890 node->mergeFlags(NodeMayOverflowInt32InDFG);
891 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
892 node->mergeFlags(NodeMayNegZeroInDFG);
894 if (!isX86() && node->op() == ArithMod)
897 if (!m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex))
900 switch (node->op()) {
905 case ArithMod: // for ArithMod "MayOverflow" means we tried to divide by zero, or we saw double.
906 node->mergeFlags(NodeMayOverflowInt32InBaseline);
910 // Currently we can't tell the difference between a negation overflowing
911 // (i.e. -(1 << 31)) or generating negative zero (i.e. -0). If it took slow
912 // path then we assume that it did both of those things.
913 node->mergeFlags(NodeMayOverflowInt32InBaseline);
914 node->mergeFlags(NodeMayNegZeroInBaseline);
918 ResultProfile& resultProfile = *m_inlineStackTop->m_profiledBlock->resultProfileForBytecodeOffset(m_currentIndex);
919 if (resultProfile.didObserveInt52Overflow())
920 node->mergeFlags(NodeMayOverflowInt52);
921 if (resultProfile.didObserveInt32Overflow() || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
922 node->mergeFlags(NodeMayOverflowInt32InBaseline);
923 if (resultProfile.didObserveNegZeroDouble() || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
924 node->mergeFlags(NodeMayNegZeroInBaseline);
925 if (resultProfile.didObserveNonInt32())
926 node->mergeFlags(NodeMayHaveNonIntResult);
931 RELEASE_ASSERT_NOT_REACHED();
938 Node* makeDivSafe(Node* node)
940 ASSERT(node->op() == ArithDiv);
942 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
943 node->mergeFlags(NodeMayOverflowInt32InDFG);
944 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
945 node->mergeFlags(NodeMayNegZeroInDFG);
947 // The main slow case counter for op_div in the old JIT counts only when
948 // the operands are not numbers. We don't care about that since we already
949 // have speculations in place that take care of that separately. We only
950 // care about when the outcome of the division is not an integer, which
951 // is what the special fast case counter tells us.
953 if (!m_inlineStackTop->m_profiledBlock->couldTakeSpecialFastCase(m_currentIndex))
956 // FIXME: It might be possible to make this more granular.
957 node->mergeFlags(NodeMayOverflowInt32InBaseline | NodeMayNegZeroInBaseline);
962 void noticeArgumentsUse()
964 // All of the arguments in this function need to be formatted as JSValues because we will
965 // load from them in a random-access fashion and we don't want to have to switch on
968 for (ArgumentPosition* argument : m_inlineStackTop->m_argumentPositions)
969 argument->mergeShouldNeverUnbox(true);
972 bool needsDynamicLookup(ResolveType, OpcodeID);
975 CodeBlock* m_codeBlock;
976 CodeBlock* m_profiledBlock;
979 // The current block being generated.
980 BasicBlock* m_currentBlock;
981 // The bytecode index of the current instruction being generated.
982 unsigned m_currentIndex;
983 // The semantic origin of the current node if different from the current Index.
984 CodeOrigin m_currentSemanticOrigin;
985 // True if it's OK to OSR exit right now.
986 bool m_exitOK { false };
988 FrozenValue* m_constantUndefined;
989 FrozenValue* m_constantNull;
990 FrozenValue* m_constantNaN;
991 FrozenValue* m_constantOne;
992 Vector<Node*, 16> m_constants;
994 // The number of arguments passed to the function.
995 unsigned m_numArguments;
996 // The number of locals (vars + temporaries) used in the function.
997 unsigned m_numLocals;
998 // The number of slots (in units of sizeof(Register)) that we need to
999 // preallocate for arguments to outgoing calls from this frame. This
1000 // number includes the CallFrame slots that we initialize for the callee
1001 // (but not the callee-initialized CallerFrame and ReturnPC slots).
1002 // This number is 0 if and only if this function is a leaf.
1003 unsigned m_parameterSlots;
1004 // The number of var args passed to the next var arg node.
1005 unsigned m_numPassedVarArgs;
1007 HashMap<ConstantBufferKey, unsigned> m_constantBufferCache;
1009 struct InlineStackEntry {
1010 ByteCodeParser* m_byteCodeParser;
1012 CodeBlock* m_codeBlock;
1013 CodeBlock* m_profiledBlock;
1014 InlineCallFrame* m_inlineCallFrame;
1016 ScriptExecutable* executable() { return m_codeBlock->ownerScriptExecutable(); }
1018 QueryableExitProfile m_exitProfile;
1020 // Remapping of identifier and constant numbers from the code block being
1021 // inlined (inline callee) to the code block that we're inlining into
1022 // (the machine code block, which is the transitive, though not necessarily
1024 Vector<unsigned> m_identifierRemap;
1025 Vector<unsigned> m_constantBufferRemap;
1026 Vector<unsigned> m_switchRemap;
1028 // Blocks introduced by this code block, which need successor linking.
1029 // May include up to one basic block that includes the continuation after
1030 // the callsite in the caller. These must be appended in the order that they
1031 // are created, but their bytecodeBegin values need not be in order as they
1033 Vector<UnlinkedBlock> m_unlinkedBlocks;
1035 // Potential block linking targets. Must be sorted by bytecodeBegin, and
1036 // cannot have two blocks that have the same bytecodeBegin.
1037 Vector<BasicBlock*> m_blockLinkingTargets;
1039 // If the callsite's basic block was split into two, then this will be
1040 // the head of the callsite block. It needs its successors linked to the
1041 // m_unlinkedBlocks, but not the other way around: there's no way for
1042 // any blocks in m_unlinkedBlocks to jump back into this block.
1043 BasicBlock* m_callsiteBlockHead;
1045 // Does the callsite block head need linking? This is typically true
1046 // but will be false for the machine code block's inline stack entry
1047 // (since that one is not inlined) and for cases where an inline callee
1048 // did the linking for us.
1049 bool m_callsiteBlockHeadNeedsLinking;
1051 VirtualRegister m_returnValue;
1053 // Speculations about variable types collected from the profiled code block,
1054 // which are based on OSR exit profiles that past DFG compilatins of this
1055 // code block had gathered.
1056 LazyOperandValueProfileParser m_lazyOperands;
1058 CallLinkInfoMap m_callLinkInfos;
1059 StubInfoMap m_stubInfos;
1060 ByValInfoMap m_byValInfos;
1062 // Did we see any returns? We need to handle the (uncommon but necessary)
1063 // case where a procedure that does not return was inlined.
1066 // Did we have any early returns?
1067 bool m_didEarlyReturn;
1069 // Pointers to the argument position trackers for this slice of code.
1070 Vector<ArgumentPosition*> m_argumentPositions;
1072 InlineStackEntry* m_caller;
1077 CodeBlock* profiledBlock,
1078 BasicBlock* callsiteBlockHead,
1079 JSFunction* callee, // Null if this is a closure call.
1080 VirtualRegister returnValueVR,
1081 VirtualRegister inlineCallFrameStart,
1082 int argumentCountIncludingThis,
1083 InlineCallFrame::Kind);
1087 m_byteCodeParser->m_inlineStackTop = m_caller;
1090 VirtualRegister remapOperand(VirtualRegister operand) const
1092 if (!m_inlineCallFrame)
1095 ASSERT(!operand.isConstant());
1097 return VirtualRegister(operand.offset() + m_inlineCallFrame->stackOffset);
1101 InlineStackEntry* m_inlineStackTop;
1103 struct DelayedSetLocal {
1104 CodeOrigin m_origin;
1105 VirtualRegister m_operand;
1108 DelayedSetLocal() { }
1109 DelayedSetLocal(const CodeOrigin& origin, VirtualRegister operand, Node* value)
1111 , m_operand(operand)
1116 Node* execute(ByteCodeParser* parser, SetMode setMode = NormalSet)
1118 if (m_operand.isArgument())
1119 return parser->setArgument(m_origin, m_operand, m_value, setMode);
1120 return parser->setLocal(m_origin, m_operand, m_value, setMode);
1124 Vector<DelayedSetLocal, 2> m_setLocalQueue;
1126 CodeBlock* m_dfgCodeBlock;
1127 CallLinkStatus::ContextMap m_callContextMap;
1128 StubInfoMap m_dfgStubInfos;
1130 Instruction* m_currentInstruction;
1131 bool m_hasDebuggerEnabled;
1134 #define NEXT_OPCODE(name) \
1135 m_currentIndex += OPCODE_LENGTH(name); \
1138 #define LAST_OPCODE(name) \
1139 m_currentIndex += OPCODE_LENGTH(name); \
1141 return shouldContinueParsing
1143 ByteCodeParser::Terminality ByteCodeParser::handleCall(Instruction* pc, NodeType op, CallMode callMode)
1145 ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct));
1146 ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_tail_call));
1148 pc[1].u.operand, op, callMode, OPCODE_LENGTH(op_call),
1149 pc[2].u.operand, pc[3].u.operand, -pc[4].u.operand);
1152 ByteCodeParser::Terminality ByteCodeParser::handleCall(
1153 int result, NodeType op, CallMode callMode, unsigned instructionSize,
1154 int callee, int argumentCountIncludingThis, int registerOffset)
1156 Node* callTarget = get(VirtualRegister(callee));
1158 CallLinkStatus callLinkStatus = CallLinkStatus::computeFor(
1159 m_inlineStackTop->m_profiledBlock, currentCodeOrigin(),
1160 m_inlineStackTop->m_callLinkInfos, m_callContextMap);
1163 result, op, callMode, instructionSize, callTarget,
1164 argumentCountIncludingThis, registerOffset, callLinkStatus);
1167 ByteCodeParser::Terminality ByteCodeParser::handleCall(
1168 int result, NodeType op, CallMode callMode, unsigned instructionSize,
1169 Node* callTarget, int argumentCountIncludingThis, int registerOffset,
1170 CallLinkStatus callLinkStatus)
1173 result, op, InlineCallFrame::kindFor(callMode), instructionSize, callTarget, argumentCountIncludingThis,
1174 registerOffset, callLinkStatus, getPrediction());
1177 void ByteCodeParser::refineStatically(CallLinkStatus& callLinkStatus, Node* callTarget)
1179 if (callTarget->isCellConstant()) {
1180 callLinkStatus.setProvenConstantCallee(CallVariant(callTarget->asCell()));
1185 ByteCodeParser::Terminality ByteCodeParser::handleCall(
1186 int result, NodeType op, InlineCallFrame::Kind kind, unsigned instructionSize,
1187 Node* callTarget, int argumentCountIncludingThis, int registerOffset,
1188 CallLinkStatus callLinkStatus, SpeculatedType prediction)
1190 ASSERT(registerOffset <= 0);
1192 refineStatically(callLinkStatus, callTarget);
1194 if (Options::verboseDFGByteCodeParsing())
1195 dataLog(" Handling call at ", currentCodeOrigin(), ": ", callLinkStatus, "\n");
1197 if (!callLinkStatus.canOptimize()) {
1198 // Oddly, this conflates calls that haven't executed with calls that behaved sufficiently polymorphically
1199 // that we cannot optimize them.
1201 Node* callNode = addCall(result, op, OpInfo(), callTarget, argumentCountIncludingThis, registerOffset, prediction);
1202 if (callNode->op() == TailCall)
1204 ASSERT(callNode->op() != TailCallVarargs);
1208 unsigned nextOffset = m_currentIndex + instructionSize;
1212 if (handleInlining(callTarget, result, callLinkStatus, registerOffset, virtualRegisterForArgument(0, registerOffset), VirtualRegister(), 0, argumentCountIncludingThis, nextOffset, op, kind, prediction)) {
1213 if (m_graph.compilation())
1214 m_graph.compilation()->noticeInlinedCall();
1218 Node* callNode = addCall(result, op, callOpInfo, callTarget, argumentCountIncludingThis, registerOffset, prediction);
1219 if (callNode->op() == TailCall)
1221 ASSERT(callNode->op() != TailCallVarargs);
1225 ByteCodeParser::Terminality ByteCodeParser::handleVarargsCall(Instruction* pc, NodeType op, CallMode callMode)
1227 ASSERT(OPCODE_LENGTH(op_call_varargs) == OPCODE_LENGTH(op_construct_varargs));
1228 ASSERT(OPCODE_LENGTH(op_call_varargs) == OPCODE_LENGTH(op_tail_call_varargs));
1230 int result = pc[1].u.operand;
1231 int callee = pc[2].u.operand;
1232 int thisReg = pc[3].u.operand;
1233 int arguments = pc[4].u.operand;
1234 int firstFreeReg = pc[5].u.operand;
1235 int firstVarArgOffset = pc[6].u.operand;
1237 SpeculatedType prediction = getPrediction();
1239 Node* callTarget = get(VirtualRegister(callee));
1241 CallLinkStatus callLinkStatus = CallLinkStatus::computeFor(
1242 m_inlineStackTop->m_profiledBlock, currentCodeOrigin(),
1243 m_inlineStackTop->m_callLinkInfos, m_callContextMap);
1244 refineStatically(callLinkStatus, callTarget);
1246 if (Options::verboseDFGByteCodeParsing())
1247 dataLog(" Varargs call link status at ", currentCodeOrigin(), ": ", callLinkStatus, "\n");
1249 if (callLinkStatus.canOptimize()
1250 && handleInlining(callTarget, result, callLinkStatus, firstFreeReg, VirtualRegister(thisReg), VirtualRegister(arguments), firstVarArgOffset, 0, m_currentIndex + OPCODE_LENGTH(op_call_varargs), op, InlineCallFrame::varargsKindFor(callMode), prediction)) {
1251 if (m_graph.compilation())
1252 m_graph.compilation()->noticeInlinedCall();
1256 CallVarargsData* data = m_graph.m_callVarargsData.add();
1257 data->firstVarArgOffset = firstVarArgOffset;
1259 Node* thisChild = get(VirtualRegister(thisReg));
1261 if (op == TailCallVarargs) {
1262 if (allInlineFramesAreTailCalls()) {
1263 addToGraph(op, OpInfo(data), OpInfo(), callTarget, get(VirtualRegister(arguments)), thisChild);
1266 op = TailCallVarargsInlinedCaller;
1269 Node* call = addToGraph(op, OpInfo(data), OpInfo(prediction), callTarget, get(VirtualRegister(arguments)), thisChild);
1270 VirtualRegister resultReg(result);
1271 if (resultReg.isValid())
1272 set(resultReg, call);
1276 void ByteCodeParser::emitFunctionChecks(CallVariant callee, Node* callTarget, VirtualRegister thisArgumentReg)
1279 if (thisArgumentReg.isValid())
1280 thisArgument = get(thisArgumentReg);
1285 Node* callTargetForCheck;
1286 if (callee.isClosureCall()) {
1287 calleeCell = callee.executable();
1288 callTargetForCheck = addToGraph(GetExecutable, callTarget);
1290 calleeCell = callee.nonExecutableCallee();
1291 callTargetForCheck = callTarget;
1295 addToGraph(CheckCell, OpInfo(m_graph.freeze(calleeCell)), callTargetForCheck, thisArgument);
1298 void ByteCodeParser::emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis)
1300 for (int i = 0; i < argumentCountIncludingThis; ++i)
1301 addToGraph(Phantom, get(virtualRegisterForArgument(i, registerOffset)));
1304 unsigned ByteCodeParser::inliningCost(CallVariant callee, int argumentCountIncludingThis, CallMode callMode)
1306 CodeSpecializationKind kind = specializationKindFor(callMode);
1308 dataLog("Considering inlining ", callee, " into ", currentCodeOrigin(), "\n");
1310 if (m_hasDebuggerEnabled) {
1312 dataLog(" Failing because the debugger is in use.\n");
1316 FunctionExecutable* executable = callee.functionExecutable();
1319 dataLog(" Failing because there is no function executable.\n");
1323 // Does the number of arguments we're passing match the arity of the target? We currently
1324 // inline only if the number of arguments passed is greater than or equal to the number
1325 // arguments expected.
1326 if (static_cast<int>(executable->parameterCount()) + 1 > argumentCountIncludingThis) {
1328 dataLog(" Failing because of arity mismatch.\n");
1332 // Do we have a code block, and does the code block's size match the heuristics/requirements for
1333 // being an inline candidate? We might not have a code block (1) if code was thrown away,
1334 // (2) if we simply hadn't actually made this call yet or (3) code is a builtin function and
1335 // specialization kind is construct. In the former 2 cases, we could still theoretically attempt
1336 // to inline it if we had a static proof of what was being called; this might happen for example
1337 // if you call a global function, where watchpointing gives us static information. Overall,
1338 // it's a rare case because we expect that any hot callees would have already been compiled.
1339 CodeBlock* codeBlock = executable->baselineCodeBlockFor(kind);
1342 dataLog(" Failing because no code block available.\n");
1345 CapabilityLevel capabilityLevel = inlineFunctionForCapabilityLevel(
1346 codeBlock, kind, callee.isClosureCall());
1348 dataLog(" Call mode: ", callMode, "\n");
1349 dataLog(" Is closure call: ", callee.isClosureCall(), "\n");
1350 dataLog(" Capability level: ", capabilityLevel, "\n");
1351 dataLog(" Might inline function: ", mightInlineFunctionFor(codeBlock, kind), "\n");
1352 dataLog(" Might compile function: ", mightCompileFunctionFor(codeBlock, kind), "\n");
1353 dataLog(" Is supported for inlining: ", isSupportedForInlining(codeBlock), "\n");
1354 dataLog(" Is inlining candidate: ", codeBlock->ownerScriptExecutable()->isInliningCandidate(), "\n");
1356 if (!canInline(capabilityLevel)) {
1358 dataLog(" Failing because the function is not inlineable.\n");
1362 // Check if the caller is already too large. We do this check here because that's just
1363 // where we happen to also have the callee's code block, and we want that for the
1364 // purpose of unsetting SABI.
1365 if (!isSmallEnoughToInlineCodeInto(m_codeBlock)) {
1366 codeBlock->m_shouldAlwaysBeInlined = false;
1368 dataLog(" Failing because the caller is too large.\n");
1372 // FIXME: this should be better at predicting how much bloat we will introduce by inlining
1374 // https://bugs.webkit.org/show_bug.cgi?id=127627
1376 // FIXME: We currently inline functions that have run in LLInt but not in Baseline. These
1377 // functions have very low fidelity profiling, and presumably they weren't very hot if they
1378 // haven't gotten to Baseline yet. Consider not inlining these functions.
1379 // https://bugs.webkit.org/show_bug.cgi?id=145503
1381 // Have we exceeded inline stack depth, or are we trying to inline a recursive call to
1382 // too many levels? If either of these are detected, then don't inline. We adjust our
1383 // heuristics if we are dealing with a function that cannot otherwise be compiled.
1386 unsigned recursion = 0;
1388 for (InlineStackEntry* entry = m_inlineStackTop; entry; entry = entry->m_caller) {
1390 if (depth >= Options::maximumInliningDepth()) {
1392 dataLog(" Failing because depth exceeded.\n");
1396 if (entry->executable() == executable) {
1398 if (recursion >= Options::maximumInliningRecursion()) {
1400 dataLog(" Failing because recursion detected.\n");
1407 dataLog(" Inlining should be possible.\n");
1409 // It might be possible to inline.
1410 return codeBlock->instructionCount();
1413 template<typename ChecksFunctor>
1414 void ByteCodeParser::inlineCall(Node* callTargetNode, int resultOperand, CallVariant callee, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind kind, CallerLinkability callerLinkability, const ChecksFunctor& insertChecks)
1416 CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1418 ASSERT(inliningCost(callee, argumentCountIncludingThis, InlineCallFrame::callModeFor(kind)) != UINT_MAX);
1420 CodeBlock* codeBlock = callee.functionExecutable()->baselineCodeBlockFor(specializationKind);
1421 insertChecks(codeBlock);
1423 // FIXME: Don't flush constants!
1425 int inlineCallFrameStart = m_inlineStackTop->remapOperand(VirtualRegister(registerOffset)).offset() + JSStack::CallFrameHeaderSize;
1428 VirtualRegister(inlineCallFrameStart).toLocal() + 1 +
1429 JSStack::CallFrameHeaderSize + codeBlock->m_numCalleeLocals);
1431 size_t argumentPositionStart = m_graph.m_argumentPositions.size();
1433 VirtualRegister resultReg(resultOperand);
1434 if (resultReg.isValid())
1435 resultReg = m_inlineStackTop->remapOperand(resultReg);
1437 VariableAccessData* calleeVariable = nullptr;
1438 if (callee.isClosureCall()) {
1439 Node* calleeSet = set(
1440 VirtualRegister(registerOffset + JSStack::Callee), callTargetNode, ImmediateNakedSet);
1442 calleeVariable = calleeSet->variableAccessData();
1443 calleeVariable->mergeShouldNeverUnbox(true);
1446 InlineStackEntry inlineStackEntry(
1447 this, codeBlock, codeBlock, m_graph.lastBlock(), callee.function(), resultReg,
1448 (VirtualRegister)inlineCallFrameStart, argumentCountIncludingThis, kind);
1450 // This is where the actual inlining really happens.
1451 unsigned oldIndex = m_currentIndex;
1454 // At this point, it's again OK to OSR exit.
1457 InlineVariableData inlineVariableData;
1458 inlineVariableData.inlineCallFrame = m_inlineStackTop->m_inlineCallFrame;
1459 inlineVariableData.argumentPositionStart = argumentPositionStart;
1460 inlineVariableData.calleeVariable = 0;
1463 m_inlineStackTop->m_inlineCallFrame->isClosureCall
1464 == callee.isClosureCall());
1465 if (callee.isClosureCall()) {
1466 RELEASE_ASSERT(calleeVariable);
1467 inlineVariableData.calleeVariable = calleeVariable;
1470 m_graph.m_inlineVariableData.append(inlineVariableData);
1473 clearCaches(); // Reset our state now that we're back to the outer code.
1475 m_currentIndex = oldIndex;
1478 // If the inlined code created some new basic blocks, then we have linking to do.
1479 if (inlineStackEntry.m_callsiteBlockHead != m_graph.lastBlock()) {
1481 ASSERT(!inlineStackEntry.m_unlinkedBlocks.isEmpty());
1482 if (inlineStackEntry.m_callsiteBlockHeadNeedsLinking)
1483 linkBlock(inlineStackEntry.m_callsiteBlockHead, inlineStackEntry.m_blockLinkingTargets);
1485 ASSERT(inlineStackEntry.m_callsiteBlockHead->isLinked);
1487 if (callerLinkability == CallerDoesNormalLinking)
1488 cancelLinkingForBlock(inlineStackEntry.m_caller, inlineStackEntry.m_callsiteBlockHead);
1490 linkBlocks(inlineStackEntry.m_unlinkedBlocks, inlineStackEntry.m_blockLinkingTargets);
1492 ASSERT(inlineStackEntry.m_unlinkedBlocks.isEmpty());
1494 BasicBlock* lastBlock = m_graph.lastBlock();
1495 // If there was a return, but no early returns, then we're done. We allow parsing of
1496 // the caller to continue in whatever basic block we're in right now.
1497 if (!inlineStackEntry.m_didEarlyReturn && inlineStackEntry.m_didReturn) {
1498 if (Options::verboseDFGByteCodeParsing())
1499 dataLog(" Allowing parsing to continue in last inlined block.\n");
1501 ASSERT(lastBlock->isEmpty() || !lastBlock->terminal());
1503 // If we created new blocks then the last block needs linking, but in the
1504 // caller. It doesn't need to be linked to, but it needs outgoing links.
1505 if (!inlineStackEntry.m_unlinkedBlocks.isEmpty()) {
1506 // For debugging purposes, set the bytecodeBegin. Note that this doesn't matter
1507 // for release builds because this block will never serve as a potential target
1508 // in the linker's binary search.
1509 if (Options::verboseDFGByteCodeParsing())
1510 dataLog(" Repurposing last block from ", lastBlock->bytecodeBegin, " to ", m_currentIndex, "\n");
1511 lastBlock->bytecodeBegin = m_currentIndex;
1512 if (callerLinkability == CallerDoesNormalLinking) {
1514 dataLog("Adding unlinked block ", RawPointer(m_graph.lastBlock()), " (one return)\n");
1515 m_inlineStackTop->m_caller->m_unlinkedBlocks.append(UnlinkedBlock(m_graph.lastBlock()));
1519 m_currentBlock = m_graph.lastBlock();
1523 if (Options::verboseDFGByteCodeParsing())
1524 dataLog(" Creating new block after inlining.\n");
1526 // If we get to this point then all blocks must end in some sort of terminals.
1527 ASSERT(lastBlock->terminal());
1529 // Need to create a new basic block for the continuation at the caller.
1530 RefPtr<BasicBlock> block = adoptRef(new BasicBlock(nextOffset, m_numArguments, m_numLocals, 1));
1532 // Link the early returns to the basic block we're about to create.
1533 for (size_t i = 0; i < inlineStackEntry.m_unlinkedBlocks.size(); ++i) {
1534 if (!inlineStackEntry.m_unlinkedBlocks[i].m_needsEarlyReturnLinking)
1536 BasicBlock* blockToLink = inlineStackEntry.m_unlinkedBlocks[i].m_block;
1537 ASSERT(!blockToLink->isLinked);
1538 Node* node = blockToLink->terminal();
1539 ASSERT(node->op() == Jump);
1540 ASSERT(!node->targetBlock());
1541 node->targetBlock() = block.get();
1542 inlineStackEntry.m_unlinkedBlocks[i].m_needsEarlyReturnLinking = false;
1544 dataLog("Marking ", RawPointer(blockToLink), " as linked (jumps to return)\n");
1545 blockToLink->didLink();
1548 m_currentBlock = block.get();
1549 ASSERT(m_inlineStackTop->m_caller->m_blockLinkingTargets.isEmpty() || m_inlineStackTop->m_caller->m_blockLinkingTargets.last()->bytecodeBegin < nextOffset);
1551 dataLog("Adding unlinked block ", RawPointer(block.get()), " (many returns)\n");
1552 if (callerLinkability == CallerDoesNormalLinking) {
1553 m_inlineStackTop->m_caller->m_unlinkedBlocks.append(UnlinkedBlock(block.get()));
1554 m_inlineStackTop->m_caller->m_blockLinkingTargets.append(block.get());
1556 m_graph.appendBlock(block);
1557 prepareToParseBlock();
1560 void ByteCodeParser::cancelLinkingForBlock(InlineStackEntry* inlineStackEntry, BasicBlock* block)
1562 // It's possible that the callsite block head is not owned by the caller.
1563 if (!inlineStackEntry->m_unlinkedBlocks.isEmpty()) {
1564 // It's definitely owned by the caller, because the caller created new blocks.
1565 // Assert that this all adds up.
1566 ASSERT_UNUSED(block, inlineStackEntry->m_unlinkedBlocks.last().m_block == block);
1567 ASSERT(inlineStackEntry->m_unlinkedBlocks.last().m_needsNormalLinking);
1568 inlineStackEntry->m_unlinkedBlocks.last().m_needsNormalLinking = false;
1570 // It's definitely not owned by the caller. Tell the caller that he does not
1571 // need to link his callsite block head, because we did it for him.
1572 ASSERT(inlineStackEntry->m_callsiteBlockHeadNeedsLinking);
1573 ASSERT_UNUSED(block, inlineStackEntry->m_callsiteBlockHead == block);
1574 inlineStackEntry->m_callsiteBlockHeadNeedsLinking = false;
1578 template<typename ChecksFunctor>
1579 bool ByteCodeParser::attemptToInlineCall(Node* callTargetNode, int resultOperand, CallVariant callee, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind kind, CallerLinkability callerLinkability, SpeculatedType prediction, unsigned& inliningBalance, const ChecksFunctor& insertChecks)
1581 CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1583 if (!inliningBalance)
1587 dataLog(" Considering callee ", callee, "\n");
1589 // Intrinsics and internal functions can only be inlined if we're not doing varargs. This is because
1590 // we currently don't have any way of getting profiling information for arguments to non-JS varargs
1591 // calls. The prediction propagator won't be of any help because LoadVarargs obscures the data flow,
1592 // and there are no callsite value profiles and native function won't have callee value profiles for
1593 // those arguments. Even worse, if the intrinsic decides to exit, it won't really have anywhere to
1594 // exit to: LoadVarargs is effectful and it's part of the op_call_varargs, so we can't exit without
1595 // calling LoadVarargs twice.
1596 if (!InlineCallFrame::isVarargs(kind)) {
1598 bool didInsertChecks = false;
1599 auto insertChecksWithAccounting = [&] () {
1600 insertChecks(nullptr);
1601 didInsertChecks = true;
1604 if (InternalFunction* function = callee.internalFunction()) {
1605 if (handleConstantInternalFunction(callTargetNode, resultOperand, function, registerOffset, argumentCountIncludingThis, specializationKind, insertChecksWithAccounting)) {
1606 RELEASE_ASSERT(didInsertChecks);
1607 addToGraph(Phantom, callTargetNode);
1608 emitArgumentPhantoms(registerOffset, argumentCountIncludingThis);
1612 RELEASE_ASSERT(!didInsertChecks);
1616 Intrinsic intrinsic = callee.intrinsicFor(specializationKind);
1617 if (intrinsic != NoIntrinsic) {
1618 if (handleIntrinsicCall(callTargetNode, resultOperand, intrinsic, registerOffset, argumentCountIncludingThis, prediction, insertChecksWithAccounting)) {
1619 RELEASE_ASSERT(didInsertChecks);
1620 addToGraph(Phantom, callTargetNode);
1621 emitArgumentPhantoms(registerOffset, argumentCountIncludingThis);
1626 RELEASE_ASSERT(!didInsertChecks);
1627 // We might still try to inline the Intrinsic because it might be a builtin JS function.
1631 unsigned myInliningCost = inliningCost(callee, argumentCountIncludingThis, InlineCallFrame::callModeFor(kind));
1632 if (myInliningCost > inliningBalance)
1635 Instruction* savedCurrentInstruction = m_currentInstruction;
1636 inlineCall(callTargetNode, resultOperand, callee, registerOffset, argumentCountIncludingThis, nextOffset, kind, callerLinkability, insertChecks);
1637 inliningBalance -= myInliningCost;
1638 m_currentInstruction = savedCurrentInstruction;
1642 bool ByteCodeParser::handleInlining(
1643 Node* callTargetNode, int resultOperand, const CallLinkStatus& callLinkStatus,
1644 int registerOffsetOrFirstFreeReg, VirtualRegister thisArgument,
1645 VirtualRegister argumentsArgument, unsigned argumentsOffset, int argumentCountIncludingThis,
1646 unsigned nextOffset, NodeType callOp, InlineCallFrame::Kind kind, SpeculatedType prediction)
1649 dataLog("Handling inlining...\n");
1650 dataLog("Stack: ", currentCodeOrigin(), "\n");
1652 CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1654 if (!callLinkStatus.size()) {
1656 dataLog("Bailing inlining.\n");
1660 if (InlineCallFrame::isVarargs(kind)
1661 && callLinkStatus.maxNumArguments() > Options::maximumVarargsForInlining()) {
1663 dataLog("Bailing inlining because of varargs.\n");
1667 unsigned inliningBalance = Options::maximumFunctionForCallInlineCandidateInstructionCount();
1668 if (specializationKind == CodeForConstruct)
1669 inliningBalance = std::min(inliningBalance, Options::maximumFunctionForConstructInlineCandidateInstructionCount());
1670 if (callLinkStatus.isClosureCall())
1671 inliningBalance = std::min(inliningBalance, Options::maximumFunctionForClosureCallInlineCandidateInstructionCount());
1673 // First check if we can avoid creating control flow. Our inliner does some CFG
1674 // simplification on the fly and this helps reduce compile times, but we can only leverage
1675 // this in cases where we don't need control flow diamonds to check the callee.
1676 if (!callLinkStatus.couldTakeSlowPath() && callLinkStatus.size() == 1) {
1679 // Only used for varargs calls.
1680 unsigned mandatoryMinimum = 0;
1681 unsigned maxNumArguments = 0;
1683 if (InlineCallFrame::isVarargs(kind)) {
1684 if (FunctionExecutable* functionExecutable = callLinkStatus[0].functionExecutable())
1685 mandatoryMinimum = functionExecutable->parameterCount();
1687 mandatoryMinimum = 0;
1690 maxNumArguments = std::max(
1691 callLinkStatus.maxNumArguments(),
1692 mandatoryMinimum + 1);
1694 // We sort of pretend that this *is* the number of arguments that were passed.
1695 argumentCountIncludingThis = maxNumArguments;
1697 registerOffset = registerOffsetOrFirstFreeReg + 1;
1698 registerOffset -= maxNumArguments; // includes "this"
1699 registerOffset -= JSStack::CallFrameHeaderSize;
1700 registerOffset = -WTF::roundUpToMultipleOf(
1701 stackAlignmentRegisters(),
1704 registerOffset = registerOffsetOrFirstFreeReg;
1706 bool result = attemptToInlineCall(
1707 callTargetNode, resultOperand, callLinkStatus[0], registerOffset,
1708 argumentCountIncludingThis, nextOffset, kind, CallerDoesNormalLinking, prediction,
1709 inliningBalance, [&] (CodeBlock* codeBlock) {
1710 emitFunctionChecks(callLinkStatus[0], callTargetNode, thisArgument);
1712 // If we have a varargs call, we want to extract the arguments right now.
1713 if (InlineCallFrame::isVarargs(kind)) {
1714 int remappedRegisterOffset =
1715 m_inlineStackTop->remapOperand(VirtualRegister(registerOffset)).offset();
1717 ensureLocals(VirtualRegister(remappedRegisterOffset).toLocal());
1719 int argumentStart = registerOffset + JSStack::CallFrameHeaderSize;
1720 int remappedArgumentStart =
1721 m_inlineStackTop->remapOperand(VirtualRegister(argumentStart)).offset();
1723 LoadVarargsData* data = m_graph.m_loadVarargsData.add();
1724 data->start = VirtualRegister(remappedArgumentStart + 1);
1725 data->count = VirtualRegister(remappedRegisterOffset + JSStack::ArgumentCount);
1726 data->offset = argumentsOffset;
1727 data->limit = maxNumArguments;
1728 data->mandatoryMinimum = mandatoryMinimum;
1730 addToGraph(LoadVarargs, OpInfo(data), get(argumentsArgument));
1732 // LoadVarargs may OSR exit. Hence, we need to keep alive callTargetNode, thisArgument
1733 // and argumentsArgument for the baseline JIT. However, we only need a Phantom for
1734 // callTargetNode because the other 2 are still in use and alive at this point.
1735 addToGraph(Phantom, callTargetNode);
1737 // In DFG IR before SSA, we cannot insert control flow between after the
1738 // LoadVarargs and the last SetArgument. This isn't a problem once we get to DFG
1739 // SSA. Fortunately, we also have other reasons for not inserting control flow
1742 VariableAccessData* countVariable = newVariableAccessData(
1743 VirtualRegister(remappedRegisterOffset + JSStack::ArgumentCount));
1744 // This is pretty lame, but it will force the count to be flushed as an int. This doesn't
1745 // matter very much, since our use of a SetArgument and Flushes for this local slot is
1746 // mostly just a formality.
1747 countVariable->predict(SpecInt32);
1748 countVariable->mergeIsProfitableToUnbox(true);
1749 Node* setArgumentCount = addToGraph(SetArgument, OpInfo(countVariable));
1750 m_currentBlock->variablesAtTail.setOperand(countVariable->local(), setArgumentCount);
1752 set(VirtualRegister(argumentStart), get(thisArgument), ImmediateNakedSet);
1753 for (unsigned argument = 1; argument < maxNumArguments; ++argument) {
1754 VariableAccessData* variable = newVariableAccessData(
1755 VirtualRegister(remappedArgumentStart + argument));
1756 variable->mergeShouldNeverUnbox(true); // We currently have nowhere to put the type check on the LoadVarargs. LoadVarargs is effectful, so after it finishes, we cannot exit.
1758 // For a while it had been my intention to do things like this inside the
1759 // prediction injection phase. But in this case it's really best to do it here,
1760 // because it's here that we have access to the variable access datas for the
1761 // inlining we're about to do.
1763 // Something else that's interesting here is that we'd really love to get
1764 // predictions from the arguments loaded at the callsite, rather than the
1765 // arguments received inside the callee. But that probably won't matter for most
1767 if (codeBlock && argument < static_cast<unsigned>(codeBlock->numParameters())) {
1768 ConcurrentJITLocker locker(codeBlock->m_lock);
1769 if (ValueProfile* profile = codeBlock->valueProfileForArgument(argument))
1770 variable->predict(profile->computeUpdatedPrediction(locker));
1773 Node* setArgument = addToGraph(SetArgument, OpInfo(variable));
1774 m_currentBlock->variablesAtTail.setOperand(variable->local(), setArgument);
1779 dataLog("Done inlining (simple).\n");
1780 dataLog("Stack: ", currentCodeOrigin(), "\n");
1781 dataLog("Result: ", result, "\n");
1786 // We need to create some kind of switch over callee. For now we only do this if we believe that
1787 // we're in the top tier. We have two reasons for this: first, it provides us an opportunity to
1788 // do more detailed polyvariant/polymorphic profiling; and second, it reduces compile times in
1789 // the DFG. And by polyvariant profiling we mean polyvariant profiling of *this* call. Note that
1790 // we could improve that aspect of this by doing polymorphic inlining but having the profiling
1792 if (!isFTL(m_graph.m_plan.mode) || !Options::usePolymorphicCallInlining()
1793 || InlineCallFrame::isVarargs(kind)) {
1795 dataLog("Bailing inlining (hard).\n");
1796 dataLog("Stack: ", currentCodeOrigin(), "\n");
1801 // If the claim is that this did not originate from a stub, then we don't want to emit a switch
1802 // statement. Whenever the non-stub profiling says that it could take slow path, it really means that
1804 if (!Options::usePolymorphicCallInliningForNonStubStatus()
1805 && !callLinkStatus.isBasedOnStub()) {
1807 dataLog("Bailing inlining (non-stub polymorphism).\n");
1808 dataLog("Stack: ", currentCodeOrigin(), "\n");
1813 unsigned oldOffset = m_currentIndex;
1815 bool allAreClosureCalls = true;
1816 bool allAreDirectCalls = true;
1817 for (unsigned i = callLinkStatus.size(); i--;) {
1818 if (callLinkStatus[i].isClosureCall())
1819 allAreDirectCalls = false;
1821 allAreClosureCalls = false;
1824 Node* thingToSwitchOn;
1825 if (allAreDirectCalls)
1826 thingToSwitchOn = callTargetNode;
1827 else if (allAreClosureCalls)
1828 thingToSwitchOn = addToGraph(GetExecutable, callTargetNode);
1830 // FIXME: We should be able to handle this case, but it's tricky and we don't know of cases
1831 // where it would be beneficial. It might be best to handle these cases as if all calls were
1833 // https://bugs.webkit.org/show_bug.cgi?id=136020
1835 dataLog("Bailing inlining (mix).\n");
1836 dataLog("Stack: ", currentCodeOrigin(), "\n");
1842 dataLog("Doing hard inlining...\n");
1843 dataLog("Stack: ", currentCodeOrigin(), "\n");
1846 int registerOffset = registerOffsetOrFirstFreeReg;
1848 // This makes me wish that we were in SSA all the time. We need to pick a variable into which to
1849 // store the callee so that it will be accessible to all of the blocks we're about to create. We
1850 // get away with doing an immediate-set here because we wouldn't have performed any side effects
1853 dataLog("Register offset: ", registerOffset);
1854 VirtualRegister calleeReg(registerOffset + JSStack::Callee);
1855 calleeReg = m_inlineStackTop->remapOperand(calleeReg);
1857 dataLog("Callee is going to be ", calleeReg, "\n");
1858 setDirect(calleeReg, callTargetNode, ImmediateSetWithFlush);
1860 // It's OK to exit right now, even though we set some locals. That's because those locals are not
1865 SwitchData& data = *m_graph.m_switchData.add();
1866 data.kind = SwitchCell;
1867 addToGraph(Switch, OpInfo(&data), thingToSwitchOn);
1869 BasicBlock* originBlock = m_currentBlock;
1871 dataLog("Marking ", RawPointer(originBlock), " as linked (origin of poly inline)\n");
1872 originBlock->didLink();
1873 cancelLinkingForBlock(m_inlineStackTop, originBlock);
1875 // Each inlined callee will have a landing block that it returns at. They should all have jumps
1876 // to the continuation block, which we create last.
1877 Vector<BasicBlock*> landingBlocks;
1879 // We may force this true if we give up on inlining any of the edges.
1880 bool couldTakeSlowPath = callLinkStatus.couldTakeSlowPath();
1883 dataLog("About to loop over functions at ", currentCodeOrigin(), ".\n");
1885 for (unsigned i = 0; i < callLinkStatus.size(); ++i) {
1886 m_currentIndex = oldOffset;
1887 RefPtr<BasicBlock> block = adoptRef(new BasicBlock(UINT_MAX, m_numArguments, m_numLocals, 1));
1888 m_currentBlock = block.get();
1889 m_graph.appendBlock(block);
1890 prepareToParseBlock();
1892 Node* myCallTargetNode = getDirect(calleeReg);
1894 bool inliningResult = attemptToInlineCall(
1895 myCallTargetNode, resultOperand, callLinkStatus[i], registerOffset,
1896 argumentCountIncludingThis, nextOffset, kind, CallerLinksManually, prediction,
1897 inliningBalance, [&] (CodeBlock*) { });
1899 if (!inliningResult) {
1900 // That failed so we let the block die. Nothing interesting should have been added to
1901 // the block. We also give up on inlining any of the (less frequent) callees.
1902 ASSERT(m_currentBlock == block.get());
1903 ASSERT(m_graph.m_blocks.last() == block);
1904 m_graph.killBlockAndItsContents(block.get());
1905 m_graph.m_blocks.removeLast();
1907 // The fact that inlining failed means we need a slow path.
1908 couldTakeSlowPath = true;
1912 JSCell* thingToCaseOn;
1913 if (allAreDirectCalls)
1914 thingToCaseOn = callLinkStatus[i].nonExecutableCallee();
1916 ASSERT(allAreClosureCalls);
1917 thingToCaseOn = callLinkStatus[i].executable();
1919 data.cases.append(SwitchCase(m_graph.freeze(thingToCaseOn), block.get()));
1920 m_currentIndex = nextOffset;
1922 processSetLocalQueue(); // This only comes into play for intrinsics, since normal inlined code will leave an empty queue.
1923 if (Node* terminal = m_currentBlock->terminal())
1924 ASSERT_UNUSED(terminal, terminal->op() == TailCall || terminal->op() == TailCallVarargs);
1927 landingBlocks.append(m_currentBlock);
1930 dataLog("Marking ", RawPointer(m_currentBlock), " as linked (tail of poly inlinee)\n");
1931 m_currentBlock->didLink();
1934 dataLog("Finished inlining ", callLinkStatus[i], " at ", currentCodeOrigin(), ".\n");
1937 RefPtr<BasicBlock> slowPathBlock = adoptRef(
1938 new BasicBlock(UINT_MAX, m_numArguments, m_numLocals, 1));
1939 m_currentIndex = oldOffset;
1941 data.fallThrough = BranchTarget(slowPathBlock.get());
1942 m_graph.appendBlock(slowPathBlock);
1944 dataLog("Marking ", RawPointer(slowPathBlock.get()), " as linked (slow path block)\n");
1945 slowPathBlock->didLink();
1946 prepareToParseBlock();
1947 m_currentBlock = slowPathBlock.get();
1948 Node* myCallTargetNode = getDirect(calleeReg);
1949 if (couldTakeSlowPath) {
1951 resultOperand, callOp, OpInfo(), myCallTargetNode, argumentCountIncludingThis,
1952 registerOffset, prediction);
1954 addToGraph(CheckBadCell);
1955 addToGraph(Phantom, myCallTargetNode);
1956 emitArgumentPhantoms(registerOffset, argumentCountIncludingThis);
1958 set(VirtualRegister(resultOperand), addToGraph(BottomValue));
1961 m_currentIndex = nextOffset;
1962 m_exitOK = true; // Origin changed, so it's fine to exit again.
1963 processSetLocalQueue();
1964 if (Node* terminal = m_currentBlock->terminal())
1965 ASSERT_UNUSED(terminal, terminal->op() == TailCall || terminal->op() == TailCallVarargs);
1968 landingBlocks.append(m_currentBlock);
1971 RefPtr<BasicBlock> continuationBlock = adoptRef(
1972 new BasicBlock(UINT_MAX, m_numArguments, m_numLocals, 1));
1973 m_graph.appendBlock(continuationBlock);
1975 dataLog("Adding unlinked block ", RawPointer(continuationBlock.get()), " (continuation)\n");
1976 m_inlineStackTop->m_unlinkedBlocks.append(UnlinkedBlock(continuationBlock.get()));
1977 prepareToParseBlock();
1978 m_currentBlock = continuationBlock.get();
1980 for (unsigned i = landingBlocks.size(); i--;)
1981 landingBlocks[i]->terminal()->targetBlock() = continuationBlock.get();
1983 m_currentIndex = oldOffset;
1987 dataLog("Done inlining (hard).\n");
1988 dataLog("Stack: ", currentCodeOrigin(), "\n");
1993 template<typename ChecksFunctor>
1994 bool ByteCodeParser::handleMinMax(int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& insertChecks)
1996 if (argumentCountIncludingThis == 1) { // Math.min()
1998 set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN)));
2002 if (argumentCountIncludingThis == 2) { // Math.min(x)
2004 Node* result = get(VirtualRegister(virtualRegisterForArgument(1, registerOffset)));
2005 addToGraph(Phantom, Edge(result, NumberUse));
2006 set(VirtualRegister(resultOperand), result);
2010 if (argumentCountIncludingThis == 3) { // Math.min(x, y)
2012 set(VirtualRegister(resultOperand), addToGraph(op, get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset))));
2016 // Don't handle >=3 arguments for now.
2020 template<typename ChecksFunctor>
2021 bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrinsic intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks)
2023 switch (intrinsic) {
2025 // Intrinsic Functions:
2027 case AbsIntrinsic: {
2028 if (argumentCountIncludingThis == 1) { // Math.abs()
2030 set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN)));
2034 if (!MacroAssembler::supportsFloatingPointAbs())
2038 Node* node = addToGraph(ArithAbs, get(virtualRegisterForArgument(1, registerOffset)));
2039 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
2040 node->mergeFlags(NodeMayOverflowInt32InDFG);
2041 set(VirtualRegister(resultOperand), node);
2046 return handleMinMax(resultOperand, ArithMin, registerOffset, argumentCountIncludingThis, insertChecks);
2049 return handleMinMax(resultOperand, ArithMax, registerOffset, argumentCountIncludingThis, insertChecks);
2054 case LogIntrinsic: {
2055 if (argumentCountIncludingThis == 1) {
2057 set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN)));
2061 switch (intrinsic) {
2064 set(VirtualRegister(resultOperand), addToGraph(ArithSqrt, get(virtualRegisterForArgument(1, registerOffset))));
2069 set(VirtualRegister(resultOperand), addToGraph(ArithCos, get(virtualRegisterForArgument(1, registerOffset))));
2074 set(VirtualRegister(resultOperand), addToGraph(ArithSin, get(virtualRegisterForArgument(1, registerOffset))));
2079 set(VirtualRegister(resultOperand), addToGraph(ArithLog, get(virtualRegisterForArgument(1, registerOffset))));
2083 RELEASE_ASSERT_NOT_REACHED();
2088 case PowIntrinsic: {
2089 if (argumentCountIncludingThis < 3) {
2090 // Math.pow() and Math.pow(x) return NaN.
2092 set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN)));
2096 VirtualRegister xOperand = virtualRegisterForArgument(1, registerOffset);
2097 VirtualRegister yOperand = virtualRegisterForArgument(2, registerOffset);
2098 set(VirtualRegister(resultOperand), addToGraph(ArithPow, get(xOperand), get(yOperand)));
2102 case ArrayPushIntrinsic: {
2103 if (argumentCountIncludingThis != 2)
2106 ArrayMode arrayMode = getArrayMode(m_currentInstruction[OPCODE_LENGTH(op_call) - 2].u.arrayProfile);
2107 if (!arrayMode.isJSArray())
2109 switch (arrayMode.type()) {
2112 case Array::Contiguous:
2113 case Array::ArrayStorage: {
2115 Node* arrayPush = addToGraph(ArrayPush, OpInfo(arrayMode.asWord()), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)));
2116 set(VirtualRegister(resultOperand), arrayPush);
2126 case ArrayPopIntrinsic: {
2127 if (argumentCountIncludingThis != 1)
2130 ArrayMode arrayMode = getArrayMode(m_currentInstruction[OPCODE_LENGTH(op_call) - 2].u.arrayProfile);
2131 if (!arrayMode.isJSArray())
2133 switch (arrayMode.type()) {
2136 case Array::Contiguous:
2137 case Array::ArrayStorage: {
2139 Node* arrayPop = addToGraph(ArrayPop, OpInfo(arrayMode.asWord()), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)));
2140 set(VirtualRegister(resultOperand), arrayPop);
2149 case IsArrayIntrinsic: {
2150 if (argumentCountIncludingThis != 2)
2154 Node* isArray = addToGraph(IsArrayObject, OpInfo(prediction), get(virtualRegisterForArgument(1, registerOffset)));
2155 set(VirtualRegister(resultOperand), isArray);
2159 case IsJSArrayIntrinsic: {
2160 ASSERT(argumentCountIncludingThis == 2);
2163 Node* isArray = addToGraph(IsJSArray, OpInfo(prediction), get(virtualRegisterForArgument(1, registerOffset)));
2164 set(VirtualRegister(resultOperand), isArray);
2168 case IsArrayConstructorIntrinsic: {
2169 ASSERT(argumentCountIncludingThis == 2);
2172 Node* isArray = addToGraph(IsArrayConstructor, OpInfo(prediction), get(virtualRegisterForArgument(1, registerOffset)));
2173 set(VirtualRegister(resultOperand), isArray);
2177 case CharCodeAtIntrinsic: {
2178 if (argumentCountIncludingThis != 2)
2182 VirtualRegister thisOperand = virtualRegisterForArgument(0, registerOffset);
2183 VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
2184 Node* charCode = addToGraph(StringCharCodeAt, OpInfo(ArrayMode(Array::String).asWord()), get(thisOperand), get(indexOperand));
2186 set(VirtualRegister(resultOperand), charCode);
2190 case CharAtIntrinsic: {
2191 if (argumentCountIncludingThis != 2)
2195 VirtualRegister thisOperand = virtualRegisterForArgument(0, registerOffset);
2196 VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
2197 Node* charCode = addToGraph(StringCharAt, OpInfo(ArrayMode(Array::String).asWord()), get(thisOperand), get(indexOperand));
2199 set(VirtualRegister(resultOperand), charCode);
2202 case Clz32Intrinsic: {
2204 if (argumentCountIncludingThis == 1)
2205 set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_graph.freeze(jsNumber(32)))));
2207 Node* operand = get(virtualRegisterForArgument(1, registerOffset));
2208 set(VirtualRegister(resultOperand), addToGraph(ArithClz32, operand));
2212 case FromCharCodeIntrinsic: {
2213 if (argumentCountIncludingThis != 2)
2217 VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
2218 Node* charCode = addToGraph(StringFromCharCode, get(indexOperand));
2220 set(VirtualRegister(resultOperand), charCode);
2225 case RegExpExecIntrinsic: {
2226 if (argumentCountIncludingThis != 2)
2230 Node* regExpExec = addToGraph(RegExpExec, OpInfo(0), OpInfo(prediction), addToGraph(GetGlobalObject, callee), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)));
2231 set(VirtualRegister(resultOperand), regExpExec);
2236 case RegExpTestIntrinsic: {
2237 if (argumentCountIncludingThis != 2)
2241 Node* regExpExec = addToGraph(RegExpTest, OpInfo(0), OpInfo(prediction), addToGraph(GetGlobalObject, callee), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)));
2242 set(VirtualRegister(resultOperand), regExpExec);
2247 case IsRegExpObjectIntrinsic: {
2248 ASSERT(argumentCountIncludingThis == 2);
2251 Node* isRegExpObject = addToGraph(IsRegExpObject, OpInfo(prediction), get(virtualRegisterForArgument(1, registerOffset)));
2252 set(VirtualRegister(resultOperand), isRegExpObject);
2256 case StringPrototypeReplaceIntrinsic: {
2257 if (argumentCountIncludingThis != 3)
2261 Node* result = addToGraph(StringReplace, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset)));
2262 set(VirtualRegister(resultOperand), result);
2266 case RoundIntrinsic:
2267 case FloorIntrinsic:
2269 case TruncIntrinsic: {
2270 if (argumentCountIncludingThis == 1) {
2272 set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN)));
2275 if (argumentCountIncludingThis == 2) {
2277 Node* operand = get(virtualRegisterForArgument(1, registerOffset));
2279 if (intrinsic == RoundIntrinsic)
2281 else if (intrinsic == FloorIntrinsic)
2283 else if (intrinsic == CeilIntrinsic)
2286 ASSERT(intrinsic == TruncIntrinsic);
2289 Node* roundNode = addToGraph(op, OpInfo(0), OpInfo(prediction), operand);
2290 set(VirtualRegister(resultOperand), roundNode);
2295 case IMulIntrinsic: {
2296 if (argumentCountIncludingThis != 3)
2299 VirtualRegister leftOperand = virtualRegisterForArgument(1, registerOffset);
2300 VirtualRegister rightOperand = virtualRegisterForArgument(2, registerOffset);
2301 Node* left = get(leftOperand);
2302 Node* right = get(rightOperand);
2303 set(VirtualRegister(resultOperand), addToGraph(ArithIMul, left, right));
2307 case RandomIntrinsic: {
2308 if (argumentCountIncludingThis != 1)
2311 set(VirtualRegister(resultOperand), addToGraph(ArithRandom));
2315 case FRoundIntrinsic: {
2316 if (argumentCountIncludingThis != 2)
2319 VirtualRegister operand = virtualRegisterForArgument(1, registerOffset);
2320 set(VirtualRegister(resultOperand), addToGraph(ArithFRound, get(operand)));
2324 case DFGTrueIntrinsic: {
2326 set(VirtualRegister(resultOperand), jsConstant(jsBoolean(true)));
2330 case OSRExitIntrinsic: {
2332 addToGraph(ForceOSRExit);
2333 set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantUndefined)));
2337 case IsFinalTierIntrinsic: {
2339 set(VirtualRegister(resultOperand),
2340 jsConstant(jsBoolean(Options::useFTLJIT() ? isFTL(m_graph.m_plan.mode) : true)));
2344 case SetInt32HeapPredictionIntrinsic: {
2346 for (int i = 1; i < argumentCountIncludingThis; ++i) {
2347 Node* node = get(virtualRegisterForArgument(i, registerOffset));
2348 if (node->hasHeapPrediction())
2349 node->setHeapPrediction(SpecInt32);
2351 set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantUndefined)));
2355 case CheckInt32Intrinsic: {
2357 for (int i = 1; i < argumentCountIncludingThis; ++i) {
2358 Node* node = get(virtualRegisterForArgument(i, registerOffset));
2359 addToGraph(Phantom, Edge(node, Int32Use));
2361 set(VirtualRegister(resultOperand), jsConstant(jsBoolean(true)));
2365 case FiatInt52Intrinsic: {
2366 if (argumentCountIncludingThis != 2)
2369 VirtualRegister operand = virtualRegisterForArgument(1, registerOffset);
2371 set(VirtualRegister(resultOperand), addToGraph(FiatInt52, get(operand)));
2373 set(VirtualRegister(resultOperand), get(operand));
2382 template<typename ChecksFunctor>
2383 bool ByteCodeParser::handleIntrinsicGetter(int resultOperand, const GetByIdVariant& variant, Node* thisNode, const ChecksFunctor& insertChecks)
2385 switch (variant.intrinsic()) {
2386 case TypedArrayByteLengthIntrinsic: {
2389 TypedArrayType type = (*variant.structureSet().begin())->classInfo()->typedArrayStorageType;
2390 Array::Type arrayType = toArrayType(type);
2391 size_t logSize = logElementSize(type);
2393 variant.structureSet().forEach([&] (Structure* structure) {
2394 TypedArrayType curType = structure->classInfo()->typedArrayStorageType;
2395 ASSERT(logSize == logElementSize(curType));
2396 arrayType = refineTypedArrayType(arrayType, curType);
2397 ASSERT(arrayType != Array::Generic);
2400 Node* lengthNode = addToGraph(GetArrayLength, OpInfo(ArrayMode(arrayType).asWord()), thisNode);
2403 set(VirtualRegister(resultOperand), lengthNode);
2407 // We can use a BitLShift here because typed arrays will never have a byteLength
2408 // that overflows int32.
2409 Node* shiftNode = jsConstant(jsNumber(logSize));
2410 set(VirtualRegister(resultOperand), addToGraph(BitLShift, lengthNode, shiftNode));
2415 case TypedArrayLengthIntrinsic: {
2418 TypedArrayType type = (*variant.structureSet().begin())->classInfo()->typedArrayStorageType;
2419 Array::Type arrayType = toArrayType(type);
2421 variant.structureSet().forEach([&] (Structure* structure) {
2422 TypedArrayType curType = structure->classInfo()->typedArrayStorageType;
2423 arrayType = refineTypedArrayType(arrayType, curType);
2424 ASSERT(arrayType != Array::Generic);
2427 set(VirtualRegister(resultOperand), addToGraph(GetArrayLength, OpInfo(ArrayMode(arrayType).asWord()), thisNode));
2433 case TypedArrayByteOffsetIntrinsic: {
2436 TypedArrayType type = (*variant.structureSet().begin())->classInfo()->typedArrayStorageType;
2437 Array::Type arrayType = toArrayType(type);
2439 variant.structureSet().forEach([&] (Structure* structure) {
2440 TypedArrayType curType = structure->classInfo()->typedArrayStorageType;
2441 arrayType = refineTypedArrayType(arrayType, curType);
2442 ASSERT(arrayType != Array::Generic);
2445 set(VirtualRegister(resultOperand), addToGraph(GetTypedArrayByteOffset, OpInfo(ArrayMode(arrayType).asWord()), thisNode));
2453 RELEASE_ASSERT_NOT_REACHED();
2456 template<typename ChecksFunctor>
2457 bool ByteCodeParser::handleTypedArrayConstructor(
2458 int resultOperand, InternalFunction* function, int registerOffset,
2459 int argumentCountIncludingThis, TypedArrayType type, const ChecksFunctor& insertChecks)
2461 if (!isTypedView(type))
2464 if (function->classInfo() != constructorClassInfoForType(type))
2467 if (function->globalObject() != m_inlineStackTop->m_codeBlock->globalObject())
2470 // We only have an intrinsic for the case where you say:
2472 // new FooArray(blah);
2474 // Of course, 'blah' could be any of the following:
2476 // - Integer, indicating that you want to allocate an array of that length.
2477 // This is the thing we're hoping for, and what we can actually do meaningful
2478 // optimizations for.
2480 // - Array buffer, indicating that you want to create a view onto that _entire_
2483 // - Non-buffer object, indicating that you want to create a copy of that
2484 // object by pretending that it quacks like an array.
2486 // - Anything else, indicating that you want to have an exception thrown at
2489 // The intrinsic, NewTypedArray, will behave as if it could do any of these
2490 // things up until we do Fixup. Thereafter, if child1 (i.e. 'blah') is
2491 // predicted Int32, then we lock it in as a normal typed array allocation.
2492 // Otherwise, NewTypedArray turns into a totally opaque function call that
2493 // may clobber the world - by virtue of it accessing properties on what could
2496 // Note that although the generic form of NewTypedArray sounds sort of awful,
2497 // it is actually quite likely to be more efficient than a fully generic
2498 // Construct. So, we might want to think about making NewTypedArray variadic,
2499 // or else making Construct not super slow.
2501 if (argumentCountIncludingThis != 2)
2505 set(VirtualRegister(resultOperand),
2506 addToGraph(NewTypedArray, OpInfo(type), get(virtualRegisterForArgument(1, registerOffset))));
2510 template<typename ChecksFunctor>
2511 bool ByteCodeParser::handleConstantInternalFunction(
2512 Node* callTargetNode, int resultOperand, InternalFunction* function, int registerOffset,
2513 int argumentCountIncludingThis, CodeSpecializationKind kind, const ChecksFunctor& insertChecks)
2516 dataLog(" Handling constant internal function ", JSValue(function), "\n");
2518 if (kind == CodeForConstruct) {
2519 Node* newTargetNode = get(virtualRegisterForArgument(0, registerOffset));
2520 // We cannot handle the case where new.target != callee (i.e. a construct from a super call) because we
2521 // don't know what the prototype of the constructed object will be.
2522 // FIXME: If we have inlined super calls up to the call site, however, we should be able to figure out the structure. https://bugs.webkit.org/show_bug.cgi?id=152700
2523 if (newTargetNode != callTargetNode)
2527 if (function->classInfo() == ArrayConstructor::info()) {
2528 if (function->globalObject() != m_inlineStackTop->m_codeBlock->globalObject())
2532 if (argumentCountIncludingThis == 2) {
2533 set(VirtualRegister(resultOperand),
2534 addToGraph(NewArrayWithSize, OpInfo(ArrayWithUndecided), get(virtualRegisterForArgument(1, registerOffset))));
2538 for (int i = 1; i < argumentCountIncludingThis; ++i)
2539 addVarArgChild(get(virtualRegisterForArgument(i, registerOffset)));
2540 set(VirtualRegister(resultOperand),
2541 addToGraph(Node::VarArg, NewArray, OpInfo(ArrayWithUndecided), OpInfo(0)));
2545 if (function->classInfo() == StringConstructor::info()) {
2550 if (argumentCountIncludingThis <= 1)
2551 result = jsConstant(m_vm->smallStrings.emptyString());
2553 result = addToGraph(CallStringConstructor, get(virtualRegisterForArgument(1, registerOffset)));
2555 if (kind == CodeForConstruct)
2556 result = addToGraph(NewStringObject, OpInfo(function->globalObject()->stringObjectStructure()), result);
2558 set(VirtualRegister(resultOperand), result);
2562 // FIXME: This should handle construction as well. https://bugs.webkit.org/show_bug.cgi?id=155591
2563 if (function->classInfo() == ObjectConstructor::info() && kind == CodeForCall) {
2566 Node* result = addToGraph(CallObjectConstructor, get(virtualRegisterForArgument(1, registerOffset)));
2567 set(VirtualRegister(resultOperand), result);
2571 for (unsigned typeIndex = 0; typeIndex < NUMBER_OF_TYPED_ARRAY_TYPES; ++typeIndex) {
2572 bool result = handleTypedArrayConstructor(
2573 resultOperand, function, registerOffset, argumentCountIncludingThis,
2574 indexToTypedArrayType(typeIndex), insertChecks);
2582 Node* ByteCodeParser::handleGetByOffset(
2583 SpeculatedType prediction, Node* base, unsigned identifierNumber, PropertyOffset offset,
2584 const InferredType::Descriptor& inferredType, NodeType op)
2586 Node* propertyStorage;
2587 if (isInlineOffset(offset))
2588 propertyStorage = base;
2590 propertyStorage = addToGraph(GetButterfly, base);
2592 StorageAccessData* data = m_graph.m_storageAccessData.add();
2593 data->offset = offset;
2594 data->identifierNumber = identifierNumber;
2595 data->inferredType = inferredType;
2596 m_graph.registerInferredType(inferredType);
2598 Node* getByOffset = addToGraph(op, OpInfo(data), OpInfo(prediction), propertyStorage, base);
2603 Node* ByteCodeParser::handlePutByOffset(
2604 Node* base, unsigned identifier, PropertyOffset offset, const InferredType::Descriptor& inferredType,
2607 Node* propertyStorage;
2608 if (isInlineOffset(offset))
2609 propertyStorage = base;
2611 propertyStorage = addToGraph(GetButterfly, base);
2613 StorageAccessData* data = m_graph.m_storageAccessData.add();
2614 data->offset = offset;
2615 data->identifierNumber = identifier;
2616 data->inferredType = inferredType;
2617 m_graph.registerInferredType(inferredType);
2619 Node* result = addToGraph(PutByOffset, OpInfo(data), propertyStorage, base, value);
2624 bool ByteCodeParser::check(const ObjectPropertyCondition& condition)
2629 if (m_graph.watchCondition(condition))
2632 Structure* structure = condition.object()->structure();
2633 if (!condition.structureEnsuresValidity(structure))
2638 OpInfo(m_graph.addStructureSet(structure)),
2639 weakJSConstant(condition.object()));
2643 GetByOffsetMethod ByteCodeParser::promoteToConstant(GetByOffsetMethod method)
2645 if (method.kind() == GetByOffsetMethod::LoadFromPrototype
2646 && method.prototype()->structure()->dfgShouldWatch()) {
2647 if (JSValue constant = m_graph.tryGetConstantProperty(method.prototype()->value(), method.prototype()->structure(), method.offset()))
2648 return GetByOffsetMethod::constant(m_graph.freeze(constant));
2654 bool ByteCodeParser::needsDynamicLookup(ResolveType type, OpcodeID opcode)
2656 ASSERT(opcode == op_resolve_scope || opcode == op_get_from_scope || opcode == op_put_to_scope);
2658 JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
2659 if (needsVarInjectionChecks(type) && globalObject->varInjectionWatchpoint()->hasBeenInvalidated())
2663 case GlobalProperty:
2665 case GlobalLexicalVar:
2667 case LocalClosureVar:
2671 case UnresolvedProperty:
2672 case UnresolvedPropertyWithVarInjectionChecks: {
2673 // The heuristic for UnresolvedProperty scope accesses is we will ForceOSRExit if we
2674 // haven't exited from from this access before to let the baseline JIT try to better
2675 // cache the access. If we've already exited from this operation, it's unlikely that
2676 // the baseline will come up with a better ResolveType and instead we will compile
2677 // this as a dynamic scope access.
2679 // We only track our heuristic through resolve_scope since resolve_scope will
2680 // dominate unresolved gets/puts on that scope.
2681 if (opcode != op_resolve_scope)
2684 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, InadequateCoverage)) {
2685 // We've already exited so give up on getting better ResolveType information.
2689 // We have not exited yet, so let's have the baseline get better ResolveType information for us.
2690 // This type of code is often seen when we tier up in a loop but haven't executed the part
2691 // of a function that comes after the loop.
2698 case GlobalPropertyWithVarInjectionChecks:
2699 case GlobalVarWithVarInjectionChecks:
2700 case GlobalLexicalVarWithVarInjectionChecks:
2701 case ClosureVarWithVarInjectionChecks:
2705 ASSERT_NOT_REACHED();
2709 GetByOffsetMethod ByteCodeParser::planLoad(const ObjectPropertyCondition& condition)
2712 dataLog("Planning a load: ", condition, "\n");
2714 // We might promote this to Equivalence, and a later DFG pass might also do such promotion
2715 // even if we fail, but for simplicity this cannot be asked to load an equivalence condition.
2716 // None of the clients of this method will request a load of an Equivalence condition anyway,
2717 // and supporting it would complicate the heuristics below.
2718 RELEASE_ASSERT(condition.kind() == PropertyCondition::Presence);
2720 // Here's the ranking of how to handle this, from most preferred to least preferred:
2722 // 1) Watchpoint on an equivalence condition and return a constant node for the loaded value.
2723 // No other code is emitted, and the structure of the base object is never registered.
2724 // Hence this results in zero code and we won't jettison this compilation if the object
2725 // transitions, even if the structure is watchable right now.
2727 // 2) Need to emit a load, and the current structure of the base is going to be watched by the
2728 // DFG anyway (i.e. dfgShouldWatch). Watch the structure and emit the load. Don't watch the
2729 // condition, since the act of turning the base into a constant in IR will cause the DFG to
2730 // watch the structure anyway and doing so would subsume watching the condition.
2732 // 3) Need to emit a load, and the current structure of the base is watchable but not by the
2733 // DFG (i.e. transitionWatchpointSetIsStillValid() and !dfgShouldWatchIfPossible()). Watch
2734 // the condition, and emit a load.
2736 // 4) Need to emit a load, and the current structure of the base is not watchable. Emit a
2737 // structure check, and emit a load.
2739 // 5) The condition does not hold. Give up and return null.
2741 // First, try to promote Presence to Equivalence. We do this before doing anything else
2742 // because it's the most profitable. Also, there are cases where the presence is watchable but
2743 // we don't want to watch it unless it became an equivalence (see the relationship between
2744 // (1), (2), and (3) above).
2745 ObjectPropertyCondition equivalenceCondition = condition.attemptToMakeEquivalenceWithoutBarrier();
2746 if (m_graph.watchCondition(equivalenceCondition))
2747 return GetByOffsetMethod::constant(m_graph.freeze(equivalenceCondition.requiredValue()));
2749 // At this point, we'll have to materialize the condition's base as a constant in DFG IR. Once
2750 // we do this, the frozen value will have its own idea of what the structure is. Use that from
2751 // now on just because it's less confusing.
2752 FrozenValue* base = m_graph.freeze(condition.object());
2753 Structure* structure = base->structure();
2755 // Check if the structure that we've registered makes the condition hold. If not, just give
2756 // up. This is case (5) above.
2757 if (!condition.structureEnsuresValidity(structure))
2758 return GetByOffsetMethod();
2760 // If the structure is watched by the DFG already, then just use this fact to emit the load.
2761 // This is case (2) above.
2762 if (structure->dfgShouldWatch())
2763 return promoteToConstant(GetByOffsetMethod::loadFromPrototype(base, condition.offset()));
2765 // If we can watch the condition right now, then we can emit the load after watching it. This
2766 // is case (3) above.
2767 if (m_graph.watchCondition(condition))
2768 return promoteToConstant(GetByOffsetMethod::loadFromPrototype(base, condition.offset()));
2770 // We can't watch anything but we know that the current structure satisfies the condition. So,
2771 // check for that structure and then emit the load.
2774 OpInfo(m_graph.addStructureSet(structure)),
2775 addToGraph(JSConstant, OpInfo(base)));
2776 return promoteToConstant(GetByOffsetMethod::loadFromPrototype(base, condition.offset()));
2779 Node* ByteCodeParser::load(
2780 SpeculatedType prediction, unsigned identifierNumber, const GetByOffsetMethod& method,
2783 switch (method.kind()) {
2784 case GetByOffsetMethod::Invalid:
2786 case GetByOffsetMethod::Constant:
2787 return addToGraph(JSConstant, OpInfo(method.constant()));
2788 case GetByOffsetMethod::LoadFromPrototype: {
2789 Node* baseNode = addToGraph(JSConstant, OpInfo(method.prototype()));
2790 return handleGetByOffset(
2791 prediction, baseNode, identifierNumber, method.offset(), InferredType::Top, op);
2793 case GetByOffsetMethod::Load:
2794 // Will never see this from planLoad().
2795 RELEASE_ASSERT_NOT_REACHED();
2799 RELEASE_ASSERT_NOT_REACHED();
2803 Node* ByteCodeParser::load(
2804 SpeculatedType prediction, const ObjectPropertyCondition& condition, NodeType op)
2806 GetByOffsetMethod method = planLoad(condition);
2807 return load(prediction, m_graph.identifiers().ensure(condition.uid()), method, op);
2810 bool ByteCodeParser::check(const ObjectPropertyConditionSet& conditionSet)
2812 for (const ObjectPropertyCondition condition : conditionSet) {
2813 if (!check(condition))
2819 GetByOffsetMethod ByteCodeParser::planLoad(const ObjectPropertyConditionSet& conditionSet)
2822 dataLog("conditionSet = ", conditionSet, "\n");
2824 GetByOffsetMethod result;
2825 for (const ObjectPropertyCondition condition : conditionSet) {
2826 switch (condition.kind()) {
2827 case PropertyCondition::Presence:
2828 RELEASE_ASSERT(!result); // Should only see exactly one of these.
2829 result = planLoad(condition);
2831 return GetByOffsetMethod();
2834 if (!check(condition))
2835 return GetByOffsetMethod();
2839 RELEASE_ASSERT(!!result);
2843 Node* ByteCodeParser::load(
2844 SpeculatedType prediction, const ObjectPropertyConditionSet& conditionSet, NodeType op)
2846 GetByOffsetMethod method = planLoad(conditionSet);
2849 m_graph.identifiers().ensure(conditionSet.slotBaseCondition().uid()),
2853 ObjectPropertyCondition ByteCodeParser::presenceLike(
2854 JSObject* knownBase, UniquedStringImpl* uid, PropertyOffset offset, const StructureSet& set)
2857 return ObjectPropertyCondition();
2858 unsigned attributes;
2859 PropertyOffset firstOffset = set[0]->getConcurrently(uid, attributes);
2860 if (firstOffset != offset)
2861 return ObjectPropertyCondition();
2862 for (unsigned i = 1; i < set.size(); ++i) {
2863 unsigned otherAttributes;
2864 PropertyOffset otherOffset = set[i]->getConcurrently(uid, otherAttributes);
2865 if (otherOffset != offset || otherAttributes != attributes)
2866 return ObjectPropertyCondition();
2868 return ObjectPropertyCondition::presenceWithoutBarrier(knownBase, uid, offset, attributes);
2871 bool ByteCodeParser::checkPresenceLike(
2872 JSObject* knownBase, UniquedStringImpl* uid, PropertyOffset offset, const StructureSet& set)
2874 return check(presenceLike(knownBase, uid, offset, set));
2877 void ByteCodeParser::checkPresenceLike(
2878 Node* base, UniquedStringImpl* uid, PropertyOffset offset, const StructureSet& set)
2880 if (JSObject* knownBase = base->dynamicCastConstant<JSObject*>()) {
2881 if (checkPresenceLike(knownBase, uid, offset, set))
2885 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(set)), base);
2888 template<typename VariantType>
2889 Node* ByteCodeParser::load(
2890 SpeculatedType prediction, Node* base, unsigned identifierNumber, const VariantType& variant)
2892 // Make sure backwards propagation knows that we've used base.
2893 addToGraph(Phantom, base);
2895 bool needStructureCheck = true;
2897 UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber];
2899 if (JSObject* knownBase = base->dynamicCastConstant<JSObject*>()) {
2900 // Try to optimize away the structure check. Note that it's not worth doing anything about this
2901 // if the base's structure is watched.
2902 Structure* structure = base->constant()->structure();
2903 if (!structure->dfgShouldWatch()) {
2904 if (!variant.conditionSet().isEmpty()) {
2905 // This means that we're loading from a prototype. We expect the base not to have the
2906 // property. We can only use ObjectPropertyCondition if all of the structures in the
2907 // variant.structureSet() agree on the prototype (it would be hilariously rare if they
2908 // didn't). Note that we are relying on structureSet() having at least one element. That
2909 // will always be true here because of how GetByIdStatus/PutByIdStatus work.
2910 JSObject* prototype = variant.structureSet()[0]->storedPrototypeObject();
2911 bool allAgree = true;
2912 for (unsigned i = 1; i < variant.structureSet().size(); ++i) {
2913 if (variant.structureSet()[i]->storedPrototypeObject() != prototype) {
2919 ObjectPropertyCondition condition = ObjectPropertyCondition::absenceWithoutBarrier(
2920 knownBase, uid, prototype);
2921 if (check(condition))
2922 needStructureCheck = false;
2925 // This means we're loading directly from base. We can avoid all of the code that follows
2926 // if we can prove that the property is a constant. Otherwise, we try to prove that the
2927 // property is watchably present, in which case we get rid of the structure check.
2929 ObjectPropertyCondition presenceCondition =
2930 presenceLike(knownBase, uid, variant.offset(), variant.structureSet());
2931 if (presenceCondition) {
2932 ObjectPropertyCondition equivalenceCondition =
2933 presenceCondition.attemptToMakeEquivalenceWithoutBarrier();
2934 if (m_graph.watchCondition(equivalenceCondition))
2935 return weakJSConstant(equivalenceCondition.requiredValue());
2937 if (check(presenceCondition))
2938 needStructureCheck = false;
2944 if (needStructureCheck)
2945 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(variant.structureSet())), base);
2947 SpeculatedType loadPrediction;
2949 if (variant.callLinkStatus() || variant.intrinsic() != NoIntrinsic) {
2950 loadPrediction = SpecCellOther;
2951 loadOp = GetGetterSetterByOffset;
2953 loadPrediction = prediction;
2954 loadOp = GetByOffset;
2958 if (!variant.conditionSet().isEmpty())
2959 loadedValue = load(loadPrediction, variant.conditionSet(), loadOp);
2961 if (needStructureCheck && base->hasConstant()) {
2962 // We did emit a structure check. That means that we have an opportunity to do constant folding
2963 // here, since we didn't do it above.
2964 JSValue constant = m_graph.tryGetConstantProperty(
2965 base->asJSValue(), variant.structureSet(), variant.offset());
2967 return weakJSConstant(constant);
2970 InferredType::Descriptor inferredType;
2971 if (needStructureCheck) {
2972 for (Structure* structure : variant.structureSet()) {
2973 InferredType::Descriptor thisType = m_graph.inferredTypeForProperty(structure, uid);
2974 inferredType.merge(thisType);
2977 inferredType = InferredType::Top;
2979 loadedValue = handleGetByOffset(
2980 loadPrediction, base, identifierNumber, variant.offset(), inferredType, loadOp);
2986 Node* ByteCodeParser::store(Node* base, unsigned identifier, const PutByIdVariant& variant, Node* value)
2988 RELEASE_ASSERT(variant.kind() == PutByIdVariant::Replace);
2990 checkPresenceLike(base, m_graph.identifiers()[identifier], variant.offset(), variant.structure());
2991 return handlePutByOffset(base, identifier, variant.offset(), variant.requiredType(), value);
2994 void ByteCodeParser::handleGetById(
2995 int destinationOperand, SpeculatedType prediction, Node* base, unsigned identifierNumber,
2996 GetByIdStatus getByIdStatus, AccessType type)
2998 // Attempt to reduce the set of things in the GetByIdStatus.
2999 if (base->op() == NewObject) {
3001 for (unsigned i = m_currentBlock->size(); i--;) {
3002 Node* node = m_currentBlock->at(i);
3005 if (writesOverlap(m_graph, node, JSCell_structureID)) {
3011 getByIdStatus.filter(base->structure());
3015 if (type == AccessType::Get)
3016 getById = getByIdStatus.makesCalls() ? GetByIdFlush : GetById;
3018 getById = TryGetById;
3020 ASSERT(type == AccessType::Get || !getByIdStatus.makesCalls());
3021 if (!getByIdStatus.isSimple() || !getByIdStatus.numVariants() || !Options::useAccessInlining()) {
3022 set(VirtualRegister(destinationOperand),
3023 addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base));
3027 if (getByIdStatus.numVariants() > 1) {
3028 if (getByIdStatus.makesCalls() || !isFTL(m_graph.m_plan.mode)
3029 || !Options::usePolymorphicAccessInlining()) {
3030 set(VirtualRegister(destinationOperand),
3031 addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base));
3035 Vector<MultiGetByOffsetCase, 2> cases;
3037 // 1) Emit prototype structure checks for all chains. This could sort of maybe not be
3038 // optimal, if there is some rarely executed case in the chain that requires a lot
3039 // of checks and those checks are not watchpointable.
3040 for (const GetByIdVariant& variant : getByIdStatus.variants()) {
3041 if (variant.intrinsic() != NoIntrinsic) {
3042 set(VirtualRegister(destinationOperand),
3043 addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base));
3047 if (variant.conditionSet().isEmpty()) {
3049 MultiGetByOffsetCase(
3050 variant.structureSet(),
3051 GetByOffsetMethod::load(variant.offset())));
3055 GetByOffsetMethod method = planLoad(variant.conditionSet());
3057 set(VirtualRegister(destinationOperand),
3058 addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base));
3062 cases.append(MultiGetByOffsetCase(variant.structureSet(), method));
3065 if (m_graph.compilation())
3066 m_graph.compilation()->noticeInlinedGetById();
3068 // 2) Emit a MultiGetByOffset
3069 MultiGetByOffsetData* data = m_graph.m_multiGetByOffsetData.add();
3070 data->cases = cases;
3071 data->identifierNumber = identifierNumber;
3072 set(VirtualRegister(destinationOperand),
3073 addToGraph(MultiGetByOffset, OpInfo(data), OpInfo(prediction), base));
3077 ASSERT(getByIdStatus.numVariants() == 1);
3078 GetByIdVariant variant = getByIdStatus[0];
3080 Node* loadedValue = load(prediction, base, identifierNumber, variant);
3082 set(VirtualRegister(destinationOperand),
3083 addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base));
3087 if (m_graph.compilation())
3088 m_graph.compilation()->noticeInlinedGetById();
3090 ASSERT(type == AccessType::Get || !variant.callLinkStatus());
3091 if (!variant.callLinkStatus() && variant.intrinsic() == NoIntrinsic) {
3092 set(VirtualRegister(destinationOperand), loadedValue);
3096 Node* getter = addToGraph(GetGetter, loadedValue);
3098 if (handleIntrinsicGetter(destinationOperand, variant, base,
3100 addToGraph(CheckCell, OpInfo(m_graph.freeze(variant.intrinsicFunction())), getter, base);
3102 addToGraph(Phantom, getter);
3106 ASSERT(variant.intrinsic() == NoIntrinsic);
3108 // Make a call. We don't try to get fancy with using the smallest operand number because
3109 // the stack layout phase should compress the stack anyway.
3111 unsigned numberOfParameters = 0;
3112 numberOfParameters++; // The 'this' argument.
3113 numberOfParameters++; // True return PC.
3115 // Start with a register offset that corresponds to the last in-use register.
3116 int registerOffset = virtualRegisterForLocal(
3117 m_inlineStackTop->m_profiledBlock->m_numCalleeLocals - 1).offset();
3118 registerOffset -= numberOfParameters;
3119 registerOffset -= JSStack::CallFrameHeaderSize;
3121 // Get the alignment right.
3122 registerOffset = -WTF::roundUpToMultipleOf(
3123 stackAlignmentRegisters(),
3127 m_inlineStackTop->remapOperand(
3128 VirtualRegister(registerOffset)).toLocal());
3130 // Issue SetLocals. This has two effects:
3131 // 1) That's how handleCall() sees the arguments.
3132 // 2) If we inline then this ensures that the arguments are flushed so that if you use
3133 // the dreaded arguments object on the getter, the right things happen. Well, sort of -
3134 // since we only really care about 'this' in this case. But we're not going to take that
3136 int nextRegister = registerOffset + JSStack::CallFrameHeaderSize;
3137 set(VirtualRegister(nextRegister++), base, ImmediateNakedSet);
3139 // We've set some locals, but they are not user-visible. It's still OK to exit from here.
3144 destinationOperand, Call, InlineCallFrame::GetterCall, OPCODE_LENGTH(op_get_by_id),
3145 getter, numberOfParameters - 1, registerOffset, *variant.callLinkStatus(), prediction);
3148 void ByteCodeParser::emitPutById(
3149 Node* base, unsigned identifierNumber, Node* value, const PutByIdStatus& putByIdStatus, bool isDirect)
3152 addToGraph(PutByIdDirect, OpInfo(identifierNumber), base, value);
3154 addToGraph(putByIdStatus.makesCalls() ? PutByIdFlush : PutById, OpInfo(identifierNumber), base, value);
3157 void ByteCodeParser::handlePutById(
3158 Node* base, unsigned identifierNumber, Node* value,
3159 const PutByIdStatus& putByIdStatus, bool isDirect)
3161 if (!putByIdStatus.isSimple() || !putByIdStatus.numVariants() || !Options::useAccessInlining()) {
3162 if (!putByIdStatus.isSet())
3163 addToGraph(ForceOSRExit);
3164 emitPutById(base, identifierNumber, value, putByIdStatus, isDirect);
3168 if (putByIdStatus.numVariants() > 1) {
3169 if (!isFTL(m_graph.m_plan.mode) || putByIdStatus.makesCalls()
3170 || !Options::usePolymorphicAccessInlining()) {
3171 emitPutById(base, identifierNumber, value, putByIdStatus, isDirect);
3176 for (unsigned variantIndex = putByIdStatus.numVariants(); variantIndex--;) {
3177 if (putByIdStatus[variantIndex].kind() != PutByIdVariant::Transition)