2 * Copyright (C) 2011-2019 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "DFGByteCodeParser.h"
31 #include "ArithProfile.h"
32 #include "ArrayConstructor.h"
33 #include "BasicBlockLocation.h"
34 #include "BuiltinNames.h"
35 #include "BytecodeStructs.h"
36 #include "CallLinkStatus.h"
37 #include "CodeBlock.h"
38 #include "CodeBlockWithJITType.h"
39 #include "CommonSlowPaths.h"
40 #include "DFGAbstractHeap.h"
41 #include "DFGArrayMode.h"
43 #include "DFGCapabilities.h"
44 #include "DFGClobberize.h"
45 #include "DFGClobbersExitState.h"
47 #include "DFGJITCode.h"
48 #include "FunctionCodeBlock.h"
49 #include "GetByIdStatus.h"
51 #include "InByIdStatus.h"
52 #include "InstanceOfStatus.h"
53 #include "JSCInlines.h"
54 #include "JSFixedArray.h"
55 #include "JSImmutableButterfly.h"
56 #include "JSModuleEnvironment.h"
57 #include "JSModuleNamespaceObject.h"
58 #include "NumberConstructor.h"
59 #include "ObjectConstructor.h"
60 #include "OpcodeInlines.h"
61 #include "PreciseJumpTargets.h"
62 #include "PutByIdFlags.h"
63 #include "PutByIdStatus.h"
64 #include "RegExpPrototype.h"
65 #include "StackAlignment.h"
66 #include "StringConstructor.h"
67 #include "StructureStubInfo.h"
68 #include "SymbolConstructor.h"
70 #include <wtf/CommaPrinter.h>
71 #include <wtf/HashMap.h>
72 #include <wtf/MathExtras.h>
73 #include <wtf/SetForScope.h>
74 #include <wtf/StdLibExtras.h>
76 namespace JSC { namespace DFG {
78 namespace DFGByteCodeParserInternal {
80 static const bool verbose = false;
82 static const bool verbose = true;
84 } // namespace DFGByteCodeParserInternal
86 #define VERBOSE_LOG(...) do { \
87 if (DFGByteCodeParserInternal::verbose && Options::verboseDFGBytecodeParsing()) \
88 dataLog(__VA_ARGS__); \
91 // === ByteCodeParser ===
93 // This class is used to compile the dataflow graph from a CodeBlock.
94 class ByteCodeParser {
96 ByteCodeParser(Graph& graph)
98 , m_codeBlock(graph.m_codeBlock)
99 , m_profiledBlock(graph.m_profiledBlock)
103 , m_constantUndefined(graph.freeze(jsUndefined()))
104 , m_constantNull(graph.freeze(jsNull()))
105 , m_constantNaN(graph.freeze(jsNumber(PNaN)))
106 , m_constantOne(graph.freeze(jsNumber(1)))
107 , m_numArguments(m_codeBlock->numParameters())
108 , m_numLocals(m_codeBlock->numCalleeLocals())
109 , m_parameterSlots(0)
110 , m_numPassedVarArgs(0)
111 , m_inlineStackTop(0)
112 , m_currentInstruction(0)
113 , m_hasDebuggerEnabled(graph.hasDebuggerEnabled())
115 ASSERT(m_profiledBlock);
118 // Parse a full CodeBlock of bytecode.
122 struct InlineStackEntry;
124 // Just parse from m_currentIndex to the end of the current CodeBlock.
125 void parseCodeBlock();
127 void ensureLocals(unsigned newNumLocals)
129 VERBOSE_LOG(" ensureLocals: trying to raise m_numLocals from ", m_numLocals, " to ", newNumLocals, "\n");
130 if (newNumLocals <= m_numLocals)
132 m_numLocals = newNumLocals;
133 for (size_t i = 0; i < m_graph.numBlocks(); ++i)
134 m_graph.block(i)->ensureLocals(newNumLocals);
137 // Helper for min and max.
138 template<typename ChecksFunctor>
139 bool handleMinMax(VirtualRegister result, NodeType op, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& insertChecks);
141 void refineStatically(CallLinkStatus&, Node* callTarget);
142 // Blocks can either be targetable (i.e. in the m_blockLinkingTargets of one InlineStackEntry) with a well-defined bytecodeBegin,
143 // or they can be untargetable, with bytecodeBegin==UINT_MAX, to be managed manually and not by the linkBlock machinery.
144 // This is used most notably when doing polyvariant inlining (it requires a fair bit of control-flow with no bytecode analog).
145 // It is also used when doing an early return from an inlined callee: it is easier to fix the bytecode index later on if needed
146 // than to move the right index all the way to the treatment of op_ret.
147 BasicBlock* allocateTargetableBlock(unsigned bytecodeIndex);
148 BasicBlock* allocateUntargetableBlock();
149 // An untargetable block can be given a bytecodeIndex to be later managed by linkBlock, but only once, and it can never go in the other direction
150 void makeBlockTargetable(BasicBlock*, unsigned bytecodeIndex);
151 void addJumpTo(BasicBlock*);
152 void addJumpTo(unsigned bytecodeIndex);
153 // Handle calls. This resolves issues surrounding inlining and intrinsics.
154 enum Terminality { Terminal, NonTerminal };
155 Terminality handleCall(
156 VirtualRegister result, NodeType op, InlineCallFrame::Kind, unsigned instructionSize,
157 Node* callTarget, int argumentCountIncludingThis, int registerOffset, CallLinkStatus,
158 SpeculatedType prediction);
159 template<typename CallOp>
160 Terminality handleCall(const Instruction* pc, NodeType op, CallMode);
161 template<typename CallOp>
162 Terminality handleVarargsCall(const Instruction* pc, NodeType op, CallMode);
163 void emitFunctionChecks(CallVariant, Node* callTarget, VirtualRegister thisArgumnt);
164 void emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis);
165 Node* getArgumentCount();
166 template<typename ChecksFunctor>
167 bool handleRecursiveTailCall(Node* callTargetNode, CallVariant, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& emitFunctionCheckIfNeeded);
168 unsigned inliningCost(CallVariant, int argumentCountIncludingThis, InlineCallFrame::Kind); // Return UINT_MAX if it's not an inlining candidate. By convention, intrinsics have a cost of 1.
169 // Handle inlining. Return true if it succeeded, false if we need to plant a call.
170 bool handleVarargsInlining(Node* callTargetNode, VirtualRegister result, const CallLinkStatus&, int registerOffset, VirtualRegister thisArgument, VirtualRegister argumentsArgument, unsigned argumentsOffset, NodeType callOp, InlineCallFrame::Kind);
171 unsigned getInliningBalance(const CallLinkStatus&, CodeSpecializationKind);
172 enum class CallOptimizationResult { OptimizedToJump, Inlined, DidNothing };
173 CallOptimizationResult handleCallVariant(Node* callTargetNode, VirtualRegister result, CallVariant, int registerOffset, VirtualRegister thisArgument, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind, SpeculatedType prediction, unsigned& inliningBalance, BasicBlock* continuationBlock, bool needsToCheckCallee);
174 CallOptimizationResult handleInlining(Node* callTargetNode, VirtualRegister result, const CallLinkStatus&, int registerOffset, VirtualRegister thisArgument, int argumentCountIncludingThis, unsigned nextOffset, NodeType callOp, InlineCallFrame::Kind, SpeculatedType prediction);
175 template<typename ChecksFunctor>
176 void inlineCall(Node* callTargetNode, VirtualRegister result, CallVariant, int registerOffset, int argumentCountIncludingThis, InlineCallFrame::Kind, BasicBlock* continuationBlock, const ChecksFunctor& insertChecks);
177 // Handle intrinsic functions. Return true if it succeeded, false if we need to plant a call.
178 template<typename ChecksFunctor>
179 bool handleIntrinsicCall(Node* callee, VirtualRegister result, Intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks);
180 template<typename ChecksFunctor>
181 bool handleDOMJITCall(Node* callee, VirtualRegister result, const DOMJIT::Signature*, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks);
182 template<typename ChecksFunctor>
183 bool handleIntrinsicGetter(VirtualRegister result, SpeculatedType prediction, const GetByIdVariant& intrinsicVariant, Node* thisNode, const ChecksFunctor& insertChecks);
184 template<typename ChecksFunctor>
185 bool handleTypedArrayConstructor(VirtualRegister result, InternalFunction*, int registerOffset, int argumentCountIncludingThis, TypedArrayType, const ChecksFunctor& insertChecks);
186 template<typename ChecksFunctor>
187 bool handleConstantInternalFunction(Node* callTargetNode, VirtualRegister result, InternalFunction*, int registerOffset, int argumentCountIncludingThis, CodeSpecializationKind, SpeculatedType, const ChecksFunctor& insertChecks);
188 Node* handlePutByOffset(Node* base, unsigned identifier, PropertyOffset, Node* value);
189 Node* handleGetByOffset(SpeculatedType, Node* base, unsigned identifierNumber, PropertyOffset, NodeType = GetByOffset);
190 bool handleDOMJITGetter(VirtualRegister result, const GetByIdVariant&, Node* thisNode, unsigned identifierNumber, SpeculatedType prediction);
191 bool handleModuleNamespaceLoad(VirtualRegister result, SpeculatedType, Node* base, GetByIdStatus);
193 template<typename Bytecode>
194 void handlePutByVal(Bytecode, unsigned instructionSize);
195 template <typename Bytecode>
196 void handlePutAccessorById(NodeType, Bytecode);
197 template <typename Bytecode>
198 void handlePutAccessorByVal(NodeType, Bytecode);
199 template <typename Bytecode>
200 void handleNewFunc(NodeType, Bytecode);
201 template <typename Bytecode>
202 void handleNewFuncExp(NodeType, Bytecode);
204 // Create a presence ObjectPropertyCondition based on some known offset and structure set. Does not
205 // check the validity of the condition, but it may return a null one if it encounters a contradiction.
206 ObjectPropertyCondition presenceLike(
207 JSObject* knownBase, UniquedStringImpl*, PropertyOffset, const StructureSet&);
209 // Attempt to watch the presence of a property. It will watch that the property is present in the same
210 // way as in all of the structures in the set. It may emit code instead of just setting a watchpoint.
211 // Returns true if this all works out.
212 bool checkPresenceLike(JSObject* knownBase, UniquedStringImpl*, PropertyOffset, const StructureSet&);
213 void checkPresenceLike(Node* base, UniquedStringImpl*, PropertyOffset, const StructureSet&);
215 // Works with both GetByIdVariant and the setter form of PutByIdVariant.
216 template<typename VariantType>
217 Node* load(SpeculatedType, Node* base, unsigned identifierNumber, const VariantType&);
219 Node* store(Node* base, unsigned identifier, const PutByIdVariant&, Node* value);
221 template<typename Op>
222 void parseGetById(const Instruction*);
224 VirtualRegister destination, SpeculatedType, Node* base, unsigned identifierNumber, GetByIdStatus, AccessType, unsigned instructionSize);
226 Node* base, unsigned identifierNumber, Node* value, const PutByIdStatus&, bool isDirect);
228 Node* base, unsigned identifierNumber, Node* value, const PutByIdStatus&,
229 bool isDirect, unsigned intructionSize);
231 // Either register a watchpoint or emit a check for this condition. Returns false if the
232 // condition no longer holds, and therefore no reasonable check can be emitted.
233 bool check(const ObjectPropertyCondition&);
235 GetByOffsetMethod promoteToConstant(GetByOffsetMethod);
237 // Either register a watchpoint or emit a check for this condition. It must be a Presence
238 // condition. It will attempt to promote a Presence condition to an Equivalence condition.
239 // Emits code for the loaded value that the condition guards, and returns a node containing
240 // the loaded value. Returns null if the condition no longer holds.
241 GetByOffsetMethod planLoad(const ObjectPropertyCondition&);
242 Node* load(SpeculatedType, unsigned identifierNumber, const GetByOffsetMethod&, NodeType = GetByOffset);
243 Node* load(SpeculatedType, const ObjectPropertyCondition&, NodeType = GetByOffset);
245 // Calls check() for each condition in the set: that is, it either emits checks or registers
246 // watchpoints (or a combination of the two) to make the conditions hold. If any of those
247 // conditions are no longer checkable, returns false.
248 bool check(const ObjectPropertyConditionSet&);
250 // Calls check() for those conditions that aren't the slot base, and calls load() for the slot
251 // base. Does a combination of watchpoint registration and check emission to guard the
252 // conditions, and emits code to load the value from the slot base. Returns a node containing
253 // the loaded value. Returns null if any of the conditions were no longer checkable.
254 GetByOffsetMethod planLoad(const ObjectPropertyConditionSet&);
255 Node* load(SpeculatedType, const ObjectPropertyConditionSet&, NodeType = GetByOffset);
257 void prepareToParseBlock();
260 // Parse a single basic block of bytecode instructions.
261 void parseBlock(unsigned limit);
262 // Link block successors.
263 void linkBlock(BasicBlock*, Vector<BasicBlock*>& possibleTargets);
264 void linkBlocks(Vector<BasicBlock*>& unlinkedBlocks, Vector<BasicBlock*>& possibleTargets);
266 VariableAccessData* newVariableAccessData(VirtualRegister operand)
268 ASSERT(!operand.isConstant());
270 m_graph.m_variableAccessData.append(operand);
271 return &m_graph.m_variableAccessData.last();
274 // Get/Set the operands/result of a bytecode instruction.
275 Node* getDirect(VirtualRegister operand)
277 ASSERT(!operand.isConstant());
279 // Is this an argument?
280 if (operand.isArgument())
281 return getArgument(operand);
284 return getLocal(operand);
287 Node* get(VirtualRegister operand)
289 if (operand.isConstant()) {
290 unsigned constantIndex = operand.toConstantIndex();
291 unsigned oldSize = m_constants.size();
292 if (constantIndex >= oldSize || !m_constants[constantIndex]) {
293 const CodeBlock& codeBlock = *m_inlineStackTop->m_codeBlock;
294 JSValue value = codeBlock.getConstant(operand.offset());
295 SourceCodeRepresentation sourceCodeRepresentation = codeBlock.constantSourceCodeRepresentation(operand.offset());
296 if (constantIndex >= oldSize) {
297 m_constants.grow(constantIndex + 1);
298 for (unsigned i = oldSize; i < m_constants.size(); ++i)
299 m_constants[i] = nullptr;
302 Node* constantNode = nullptr;
303 if (sourceCodeRepresentation == SourceCodeRepresentation::Double)
304 constantNode = addToGraph(DoubleConstant, OpInfo(m_graph.freezeStrong(jsDoubleNumber(value.asNumber()))));
306 constantNode = addToGraph(JSConstant, OpInfo(m_graph.freezeStrong(value)));
307 m_constants[constantIndex] = constantNode;
309 ASSERT(m_constants[constantIndex]);
310 return m_constants[constantIndex];
313 if (inlineCallFrame()) {
314 if (!inlineCallFrame()->isClosureCall) {
315 JSFunction* callee = inlineCallFrame()->calleeConstant();
316 if (operand.offset() == CallFrameSlot::callee)
317 return weakJSConstant(callee);
319 } else if (operand.offset() == CallFrameSlot::callee) {
320 // We have to do some constant-folding here because this enables CreateThis folding. Note
321 // that we don't have such watchpoint-based folding for inlined uses of Callee, since in that
322 // case if the function is a singleton then we already know it.
323 if (FunctionExecutable* executable = jsDynamicCast<FunctionExecutable*>(*m_vm, m_codeBlock->ownerExecutable())) {
324 if (JSFunction* function = executable->singleton().inferredValue()) {
325 m_graph.watchpoints().addLazily(executable);
326 return weakJSConstant(function);
329 return addToGraph(GetCallee);
332 return getDirect(m_inlineStackTop->remapOperand(operand));
336 // A normal set which follows a two-phase commit that spans code origins. During
337 // the current code origin it issues a MovHint, and at the start of the next
338 // code origin there will be a SetLocal. If the local needs flushing, the second
339 // SetLocal will be preceded with a Flush.
342 // A set where the SetLocal happens immediately and there is still a Flush. This
343 // is relevant when assigning to a local in tricky situations for the delayed
344 // SetLocal logic but where we know that we have not performed any side effects
345 // within this code origin. This is a safe replacement for NormalSet anytime we
346 // know that we have not yet performed side effects in this code origin.
347 ImmediateSetWithFlush,
349 // A set where the SetLocal happens immediately and we do not Flush it even if
350 // this is a local that is marked as needing it. This is relevant when
351 // initializing locals at the top of a function.
354 Node* setDirect(VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
356 addToGraph(MovHint, OpInfo(operand.offset()), value);
358 // We can't exit anymore because our OSR exit state has changed.
361 DelayedSetLocal delayed(currentCodeOrigin(), operand, value, setMode);
363 if (setMode == NormalSet) {
364 m_setLocalQueue.append(delayed);
368 return delayed.execute(this);
371 void processSetLocalQueue()
373 for (unsigned i = 0; i < m_setLocalQueue.size(); ++i)
374 m_setLocalQueue[i].execute(this);
375 m_setLocalQueue.shrink(0);
378 Node* set(VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
380 return setDirect(m_inlineStackTop->remapOperand(operand), value, setMode);
383 Node* injectLazyOperandSpeculation(Node* node)
385 ASSERT(node->op() == GetLocal);
386 ASSERT(node->origin.semantic.bytecodeIndex() == m_currentIndex);
387 ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
388 LazyOperandValueProfileKey key(m_currentIndex, node->local());
389 SpeculatedType prediction = m_inlineStackTop->m_lazyOperands.prediction(locker, key);
390 node->variableAccessData()->predict(prediction);
394 // Used in implementing get/set, above, where the operand is a local variable.
395 Node* getLocal(VirtualRegister operand)
397 unsigned local = operand.toLocal();
399 Node* node = m_currentBlock->variablesAtTail.local(local);
401 // This has two goals: 1) link together variable access datas, and 2)
402 // try to avoid creating redundant GetLocals. (1) is required for
403 // correctness - no other phase will ensure that block-local variable
404 // access data unification is done correctly. (2) is purely opportunistic
405 // and is meant as an compile-time optimization only.
407 VariableAccessData* variable;
410 variable = node->variableAccessData();
412 switch (node->op()) {
416 return node->child1().node();
421 variable = newVariableAccessData(operand);
423 node = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variable)));
424 m_currentBlock->variablesAtTail.local(local) = node;
427 Node* setLocal(const CodeOrigin& semanticOrigin, VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
429 SetForScope<CodeOrigin> originChange(m_currentSemanticOrigin, semanticOrigin);
431 unsigned local = operand.toLocal();
433 if (setMode != ImmediateNakedSet) {
434 ArgumentPosition* argumentPosition = findArgumentPositionForLocal(operand);
435 if (argumentPosition)
436 flushDirect(operand, argumentPosition);
437 else if (m_graph.needsScopeRegister() && operand == m_codeBlock->scopeRegister())
441 VariableAccessData* variableAccessData = newVariableAccessData(operand);
442 variableAccessData->mergeStructureCheckHoistingFailed(
443 m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex(), BadCache));
444 variableAccessData->mergeCheckArrayHoistingFailed(
445 m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex(), BadIndexingType));
446 Node* node = addToGraph(SetLocal, OpInfo(variableAccessData), value);
447 m_currentBlock->variablesAtTail.local(local) = node;
451 // Used in implementing get/set, above, where the operand is an argument.
452 Node* getArgument(VirtualRegister operand)
454 unsigned argument = operand.toArgument();
455 ASSERT(argument < m_numArguments);
457 Node* node = m_currentBlock->variablesAtTail.argument(argument);
459 VariableAccessData* variable;
462 variable = node->variableAccessData();
464 switch (node->op()) {
468 return node->child1().node();
473 variable = newVariableAccessData(operand);
475 node = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variable)));
476 m_currentBlock->variablesAtTail.argument(argument) = node;
479 Node* setArgument(const CodeOrigin& semanticOrigin, VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
481 SetForScope<CodeOrigin> originChange(m_currentSemanticOrigin, semanticOrigin);
483 unsigned argument = operand.toArgument();
484 ASSERT(argument < m_numArguments);
486 VariableAccessData* variableAccessData = newVariableAccessData(operand);
488 // Always flush arguments, except for 'this'. If 'this' is created by us,
489 // then make sure that it's never unboxed.
490 if (argument || m_graph.needsFlushedThis()) {
491 if (setMode != ImmediateNakedSet)
492 flushDirect(operand);
495 if (!argument && m_codeBlock->specializationKind() == CodeForConstruct)
496 variableAccessData->mergeShouldNeverUnbox(true);
498 variableAccessData->mergeStructureCheckHoistingFailed(
499 m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex(), BadCache));
500 variableAccessData->mergeCheckArrayHoistingFailed(
501 m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex(), BadIndexingType));
502 Node* node = addToGraph(SetLocal, OpInfo(variableAccessData), value);
503 m_currentBlock->variablesAtTail.argument(argument) = node;
507 ArgumentPosition* findArgumentPositionForArgument(int argument)
509 InlineStackEntry* stack = m_inlineStackTop;
510 while (stack->m_inlineCallFrame)
511 stack = stack->m_caller;
512 return stack->m_argumentPositions[argument];
515 ArgumentPosition* findArgumentPositionForLocal(VirtualRegister operand)
517 for (InlineStackEntry* stack = m_inlineStackTop; ; stack = stack->m_caller) {
518 InlineCallFrame* inlineCallFrame = stack->m_inlineCallFrame;
519 if (!inlineCallFrame)
521 if (operand.offset() < static_cast<int>(inlineCallFrame->stackOffset + CallFrame::headerSizeInRegisters))
523 if (operand.offset() >= static_cast<int>(inlineCallFrame->stackOffset + CallFrame::thisArgumentOffset() + inlineCallFrame->argumentsWithFixup.size()))
525 int argument = VirtualRegister(operand.offset() - inlineCallFrame->stackOffset).toArgument();
526 return stack->m_argumentPositions[argument];
531 ArgumentPosition* findArgumentPosition(VirtualRegister operand)
533 if (operand.isArgument())
534 return findArgumentPositionForArgument(operand.toArgument());
535 return findArgumentPositionForLocal(operand);
538 template<typename AddFlushDirectFunc>
539 void flushImpl(InlineCallFrame* inlineCallFrame, const AddFlushDirectFunc& addFlushDirect)
542 if (inlineCallFrame) {
543 ASSERT(!m_graph.hasDebuggerEnabled());
544 numArguments = inlineCallFrame->argumentsWithFixup.size();
545 if (inlineCallFrame->isClosureCall)
546 addFlushDirect(inlineCallFrame, remapOperand(inlineCallFrame, VirtualRegister(CallFrameSlot::callee)));
547 if (inlineCallFrame->isVarargs())
548 addFlushDirect(inlineCallFrame, remapOperand(inlineCallFrame, VirtualRegister(CallFrameSlot::argumentCount)));
550 numArguments = m_graph.baselineCodeBlockFor(inlineCallFrame)->numParameters();
552 for (unsigned argument = numArguments; argument--;)
553 addFlushDirect(inlineCallFrame, remapOperand(inlineCallFrame, virtualRegisterForArgument(argument)));
555 if (m_graph.needsScopeRegister())
556 addFlushDirect(nullptr, m_graph.m_codeBlock->scopeRegister());
559 template<typename AddFlushDirectFunc, typename AddPhantomLocalDirectFunc>
560 void flushForTerminalImpl(CodeOrigin origin, const AddFlushDirectFunc& addFlushDirect, const AddPhantomLocalDirectFunc& addPhantomLocalDirect)
562 origin.walkUpInlineStack(
563 [&] (CodeOrigin origin) {
564 unsigned bytecodeIndex = origin.bytecodeIndex();
565 InlineCallFrame* inlineCallFrame = origin.inlineCallFrame();
566 flushImpl(inlineCallFrame, addFlushDirect);
568 CodeBlock* codeBlock = m_graph.baselineCodeBlockFor(inlineCallFrame);
569 FullBytecodeLiveness& fullLiveness = m_graph.livenessFor(codeBlock);
570 const FastBitVector& livenessAtBytecode = fullLiveness.getLiveness(bytecodeIndex);
572 for (unsigned local = codeBlock->numCalleeLocals(); local--;) {
573 if (livenessAtBytecode[local])
574 addPhantomLocalDirect(inlineCallFrame, remapOperand(inlineCallFrame, virtualRegisterForLocal(local)));
579 void flush(VirtualRegister operand)
581 flushDirect(m_inlineStackTop->remapOperand(operand));
584 void flushDirect(VirtualRegister operand)
586 flushDirect(operand, findArgumentPosition(operand));
589 void flushDirect(VirtualRegister operand, ArgumentPosition* argumentPosition)
591 addFlushOrPhantomLocal<Flush>(operand, argumentPosition);
594 template<NodeType nodeType>
595 void addFlushOrPhantomLocal(VirtualRegister operand, ArgumentPosition* argumentPosition)
597 ASSERT(!operand.isConstant());
599 Node* node = m_currentBlock->variablesAtTail.operand(operand);
601 VariableAccessData* variable;
604 variable = node->variableAccessData();
606 variable = newVariableAccessData(operand);
608 node = addToGraph(nodeType, OpInfo(variable));
609 m_currentBlock->variablesAtTail.operand(operand) = node;
610 if (argumentPosition)
611 argumentPosition->addVariable(variable);
614 void phantomLocalDirect(VirtualRegister operand)
616 addFlushOrPhantomLocal<PhantomLocal>(operand, findArgumentPosition(operand));
619 void flush(InlineStackEntry* inlineStackEntry)
621 auto addFlushDirect = [&] (InlineCallFrame*, VirtualRegister reg) { flushDirect(reg); };
622 flushImpl(inlineStackEntry->m_inlineCallFrame, addFlushDirect);
625 void flushForTerminal()
627 auto addFlushDirect = [&] (InlineCallFrame*, VirtualRegister reg) { flushDirect(reg); };
628 auto addPhantomLocalDirect = [&] (InlineCallFrame*, VirtualRegister reg) { phantomLocalDirect(reg); };
629 flushForTerminalImpl(currentCodeOrigin(), addFlushDirect, addPhantomLocalDirect);
632 void flushForReturn()
634 flush(m_inlineStackTop);
637 void flushIfTerminal(SwitchData& data)
639 if (data.fallThrough.bytecodeIndex() > m_currentIndex)
642 for (unsigned i = data.cases.size(); i--;) {
643 if (data.cases[i].target.bytecodeIndex() > m_currentIndex)
650 // Assumes that the constant should be strongly marked.
651 Node* jsConstant(JSValue constantValue)
653 return addToGraph(JSConstant, OpInfo(m_graph.freezeStrong(constantValue)));
656 Node* weakJSConstant(JSValue constantValue)
658 return addToGraph(JSConstant, OpInfo(m_graph.freeze(constantValue)));
661 // Helper functions to get/set the this value.
664 return get(m_inlineStackTop->m_codeBlock->thisRegister());
667 void setThis(Node* value)
669 set(m_inlineStackTop->m_codeBlock->thisRegister(), value);
672 InlineCallFrame* inlineCallFrame()
674 return m_inlineStackTop->m_inlineCallFrame;
677 bool allInlineFramesAreTailCalls()
679 return !inlineCallFrame() || !inlineCallFrame()->getCallerSkippingTailCalls();
682 CodeOrigin currentCodeOrigin()
684 return CodeOrigin(m_currentIndex, inlineCallFrame());
687 NodeOrigin currentNodeOrigin()
692 if (m_currentSemanticOrigin.isSet())
693 semantic = m_currentSemanticOrigin;
695 semantic = currentCodeOrigin();
697 forExit = currentCodeOrigin();
699 return NodeOrigin(semantic, forExit, m_exitOK);
702 BranchData* branchData(unsigned taken, unsigned notTaken)
704 // We assume that branches originating from bytecode always have a fall-through. We
705 // use this assumption to avoid checking for the creation of terminal blocks.
706 ASSERT((taken > m_currentIndex) || (notTaken > m_currentIndex));
707 BranchData* data = m_graph.m_branchData.add();
708 *data = BranchData::withBytecodeIndices(taken, notTaken);
712 Node* addToGraph(Node* node)
714 VERBOSE_LOG(" appended ", node, " ", Graph::opName(node->op()), "\n");
716 m_hasAnyForceOSRExits |= (node->op() == ForceOSRExit);
718 m_currentBlock->append(node);
719 if (clobbersExitState(m_graph, node))
724 Node* addToGraph(NodeType op, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
726 Node* result = m_graph.addNode(
727 op, currentNodeOrigin(), Edge(child1), Edge(child2),
729 return addToGraph(result);
731 Node* addToGraph(NodeType op, Edge child1, Edge child2 = Edge(), Edge child3 = Edge())
733 Node* result = m_graph.addNode(
734 op, currentNodeOrigin(), child1, child2, child3);
735 return addToGraph(result);
737 Node* addToGraph(NodeType op, OpInfo info, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
739 Node* result = m_graph.addNode(
740 op, currentNodeOrigin(), info, Edge(child1), Edge(child2),
742 return addToGraph(result);
744 Node* addToGraph(NodeType op, OpInfo info, Edge child1, Edge child2 = Edge(), Edge child3 = Edge())
746 Node* result = m_graph.addNode(op, currentNodeOrigin(), info, child1, child2, child3);
747 return addToGraph(result);
749 Node* addToGraph(NodeType op, OpInfo info1, OpInfo info2, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
751 Node* result = m_graph.addNode(
752 op, currentNodeOrigin(), info1, info2,
753 Edge(child1), Edge(child2), Edge(child3));
754 return addToGraph(result);
756 Node* addToGraph(NodeType op, OpInfo info1, OpInfo info2, Edge child1, Edge child2 = Edge(), Edge child3 = Edge())
758 Node* result = m_graph.addNode(
759 op, currentNodeOrigin(), info1, info2, child1, child2, child3);
760 return addToGraph(result);
763 Node* addToGraph(Node::VarArgTag, NodeType op, OpInfo info1, OpInfo info2 = OpInfo())
765 Node* result = m_graph.addNode(
766 Node::VarArg, op, currentNodeOrigin(), info1, info2,
767 m_graph.m_varArgChildren.size() - m_numPassedVarArgs, m_numPassedVarArgs);
770 m_numPassedVarArgs = 0;
775 void addVarArgChild(Node* child)
777 m_graph.m_varArgChildren.append(Edge(child));
778 m_numPassedVarArgs++;
781 void addVarArgChild(Edge child)
783 m_graph.m_varArgChildren.append(child);
784 m_numPassedVarArgs++;
787 Node* addCallWithoutSettingResult(
788 NodeType op, OpInfo opInfo, Node* callee, int argCount, int registerOffset,
791 addVarArgChild(callee);
792 size_t parameterSlots = Graph::parameterSlotsForArgCount(argCount);
794 if (parameterSlots > m_parameterSlots)
795 m_parameterSlots = parameterSlots;
797 for (int i = 0; i < argCount; ++i)
798 addVarArgChild(get(virtualRegisterForArgument(i, registerOffset)));
800 return addToGraph(Node::VarArg, op, opInfo, prediction);
804 VirtualRegister result, NodeType op, const DOMJIT::Signature* signature, Node* callee, int argCount, int registerOffset,
805 SpeculatedType prediction)
807 if (op == TailCall) {
808 if (allInlineFramesAreTailCalls())
809 return addCallWithoutSettingResult(op, OpInfo(signature), callee, argCount, registerOffset, OpInfo());
810 op = TailCallInlinedCaller;
814 Node* call = addCallWithoutSettingResult(
815 op, OpInfo(signature), callee, argCount, registerOffset, OpInfo(prediction));
816 if (result.isValid())
821 Node* cellConstantWithStructureCheck(JSCell* object, Structure* structure)
823 // FIXME: This should route to emitPropertyCheck, not the other way around. But currently,
824 // this gets no profit from using emitPropertyCheck() since we'll non-adaptively watch the
825 // object's structure as soon as we make it a weakJSCosntant.
826 Node* objectNode = weakJSConstant(object);
827 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(structure)), objectNode);
831 SpeculatedType getPredictionWithoutOSRExit(unsigned bytecodeIndex)
833 auto getValueProfilePredictionFromForCodeBlockAndBytecodeOffset = [&] (CodeBlock* codeBlock, const CodeOrigin& codeOrigin)
835 SpeculatedType prediction;
837 ConcurrentJSLocker locker(codeBlock->m_lock);
838 prediction = codeBlock->valueProfilePredictionForBytecodeOffset(locker, codeOrigin.bytecodeIndex());
840 auto* fuzzerAgent = m_vm->fuzzerAgent();
841 if (UNLIKELY(fuzzerAgent))
842 return fuzzerAgent->getPrediction(codeBlock, codeOrigin, prediction) & SpecBytecodeTop;
846 SpeculatedType prediction = getValueProfilePredictionFromForCodeBlockAndBytecodeOffset(m_inlineStackTop->m_profiledBlock, CodeOrigin(bytecodeIndex, inlineCallFrame()));
847 if (prediction != SpecNone)
850 // If we have no information about the values this
851 // node generates, we check if by any chance it is
852 // a tail call opcode. In that case, we walk up the
853 // inline frames to find a call higher in the call
854 // chain and use its prediction. If we only have
855 // inlined tail call frames, we use SpecFullTop
856 // to avoid a spurious OSR exit.
857 auto instruction = m_inlineStackTop->m_profiledBlock->instructions().at(bytecodeIndex);
858 OpcodeID opcodeID = instruction->opcodeID();
862 case op_tail_call_varargs:
863 case op_tail_call_forward_arguments: {
864 // Things should be more permissive to us returning BOTTOM instead of TOP here.
865 // Currently, this will cause us to Force OSR exit. This is bad because returning
866 // TOP will cause anything that transitively touches this speculated type to
867 // also become TOP during prediction propagation.
868 // https://bugs.webkit.org/show_bug.cgi?id=164337
869 if (!inlineCallFrame())
872 CodeOrigin* codeOrigin = inlineCallFrame()->getCallerSkippingTailCalls();
876 InlineStackEntry* stack = m_inlineStackTop;
877 while (stack->m_inlineCallFrame != codeOrigin->inlineCallFrame())
878 stack = stack->m_caller;
880 return getValueProfilePredictionFromForCodeBlockAndBytecodeOffset(stack->m_profiledBlock, *codeOrigin);
887 RELEASE_ASSERT_NOT_REACHED();
891 SpeculatedType getPrediction(unsigned bytecodeIndex)
893 SpeculatedType prediction = getPredictionWithoutOSRExit(bytecodeIndex);
895 if (prediction == SpecNone) {
896 // We have no information about what values this node generates. Give up
897 // on executing this code, since we're likely to do more damage than good.
898 addToGraph(ForceOSRExit);
904 SpeculatedType getPredictionWithoutOSRExit()
906 return getPredictionWithoutOSRExit(m_currentIndex);
909 SpeculatedType getPrediction()
911 return getPrediction(m_currentIndex);
914 ArrayMode getArrayMode(Array::Action action)
916 CodeBlock* codeBlock = m_inlineStackTop->m_profiledBlock;
917 ArrayProfile* profile = codeBlock->getArrayProfile(codeBlock->bytecodeOffset(m_currentInstruction));
918 return getArrayMode(*profile, action);
921 ArrayMode getArrayMode(ArrayProfile& profile, Array::Action action)
923 ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
924 profile.computeUpdatedPrediction(locker, m_inlineStackTop->m_profiledBlock);
925 bool makeSafe = profile.outOfBounds(locker);
926 return ArrayMode::fromObserved(locker, &profile, action, makeSafe);
929 Node* makeSafe(Node* node)
931 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
932 node->mergeFlags(NodeMayOverflowInt32InDFG);
933 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
934 node->mergeFlags(NodeMayNegZeroInDFG);
936 if (!isX86() && (node->op() == ArithMod || node->op() == ValueMod))
940 ArithProfile* arithProfile = m_inlineStackTop->m_profiledBlock->arithProfileForBytecodeOffset(m_currentIndex);
942 switch (node->op()) {
946 if (arithProfile->didObserveDouble())
947 node->mergeFlags(NodeMayHaveDoubleResult);
948 if (arithProfile->didObserveNonNumeric())
949 node->mergeFlags(NodeMayHaveNonNumericResult);
950 if (arithProfile->didObserveBigInt())
951 node->mergeFlags(NodeMayHaveBigIntResult);
956 if (arithProfile->didObserveInt52Overflow())
957 node->mergeFlags(NodeMayOverflowInt52);
958 if (arithProfile->didObserveInt32Overflow() || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
959 node->mergeFlags(NodeMayOverflowInt32InBaseline);
960 if (arithProfile->didObserveNegZeroDouble() || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
961 node->mergeFlags(NodeMayNegZeroInBaseline);
962 if (arithProfile->didObserveDouble())
963 node->mergeFlags(NodeMayHaveDoubleResult);
964 if (arithProfile->didObserveNonNumeric())
965 node->mergeFlags(NodeMayHaveNonNumericResult);
966 if (arithProfile->didObserveBigInt())
967 node->mergeFlags(NodeMayHaveBigIntResult);
972 if (arithProfile->lhsObservedType().sawNumber() || arithProfile->didObserveDouble())
973 node->mergeFlags(NodeMayHaveDoubleResult);
974 if (arithProfile->didObserveNegZeroDouble() || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
975 node->mergeFlags(NodeMayNegZeroInBaseline);
976 if (arithProfile->didObserveInt32Overflow() || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
977 node->mergeFlags(NodeMayOverflowInt32InBaseline);
978 if (arithProfile->didObserveNonNumeric())
979 node->mergeFlags(NodeMayHaveNonNumericResult);
980 if (arithProfile->didObserveBigInt())
981 node->mergeFlags(NodeMayHaveBigIntResult);
991 if (m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)) {
992 switch (node->op()) {
998 case ArithMod: // for ArithMod "MayOverflow" means we tried to divide by zero, or we saw double.
999 node->mergeFlags(NodeMayOverflowInt32InBaseline);
1010 Node* makeDivSafe(Node* node)
1012 ASSERT(node->op() == ArithDiv || node->op() == ValueDiv);
1014 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
1015 node->mergeFlags(NodeMayOverflowInt32InDFG);
1016 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
1017 node->mergeFlags(NodeMayNegZeroInDFG);
1019 // The main slow case counter for op_div in the old JIT counts only when
1020 // the operands are not numbers. We don't care about that since we already
1021 // have speculations in place that take care of that separately. We only
1022 // care about when the outcome of the division is not an integer, which
1023 // is what the special fast case counter tells us.
1025 if (!m_inlineStackTop->m_profiledBlock->couldTakeSpecialFastCase(m_currentIndex))
1028 // FIXME: It might be possible to make this more granular.
1029 node->mergeFlags(NodeMayOverflowInt32InBaseline | NodeMayNegZeroInBaseline);
1031 ArithProfile* arithProfile = m_inlineStackTop->m_profiledBlock->arithProfileForBytecodeOffset(m_currentIndex);
1032 if (arithProfile->didObserveBigInt())
1033 node->mergeFlags(NodeMayHaveBigIntResult);
1038 void noticeArgumentsUse()
1040 // All of the arguments in this function need to be formatted as JSValues because we will
1041 // load from them in a random-access fashion and we don't want to have to switch on
1044 for (ArgumentPosition* argument : m_inlineStackTop->m_argumentPositions)
1045 argument->mergeShouldNeverUnbox(true);
1048 bool needsDynamicLookup(ResolveType, OpcodeID);
1051 CodeBlock* m_codeBlock;
1052 CodeBlock* m_profiledBlock;
1055 // The current block being generated.
1056 BasicBlock* m_currentBlock;
1057 // The bytecode index of the current instruction being generated.
1058 unsigned m_currentIndex;
1059 // The semantic origin of the current node if different from the current Index.
1060 CodeOrigin m_currentSemanticOrigin;
1061 // True if it's OK to OSR exit right now.
1062 bool m_exitOK { false };
1064 FrozenValue* m_constantUndefined;
1065 FrozenValue* m_constantNull;
1066 FrozenValue* m_constantNaN;
1067 FrozenValue* m_constantOne;
1068 Vector<Node*, 16> m_constants;
1070 HashMap<InlineCallFrame*, Vector<ArgumentPosition*>, WTF::DefaultHash<InlineCallFrame*>::Hash, WTF::NullableHashTraits<InlineCallFrame*>> m_inlineCallFrameToArgumentPositions;
1072 // The number of arguments passed to the function.
1073 unsigned m_numArguments;
1074 // The number of locals (vars + temporaries) used in the function.
1075 unsigned m_numLocals;
1076 // The number of slots (in units of sizeof(Register)) that we need to
1077 // preallocate for arguments to outgoing calls from this frame. This
1078 // number includes the CallFrame slots that we initialize for the callee
1079 // (but not the callee-initialized CallerFrame and ReturnPC slots).
1080 // This number is 0 if and only if this function is a leaf.
1081 unsigned m_parameterSlots;
1082 // The number of var args passed to the next var arg node.
1083 unsigned m_numPassedVarArgs;
1085 struct InlineStackEntry {
1086 ByteCodeParser* m_byteCodeParser;
1088 CodeBlock* m_codeBlock;
1089 CodeBlock* m_profiledBlock;
1090 InlineCallFrame* m_inlineCallFrame;
1092 ScriptExecutable* executable() { return m_codeBlock->ownerExecutable(); }
1094 QueryableExitProfile m_exitProfile;
1096 // Remapping of identifier and constant numbers from the code block being
1097 // inlined (inline callee) to the code block that we're inlining into
1098 // (the machine code block, which is the transitive, though not necessarily
1100 Vector<unsigned> m_identifierRemap;
1101 Vector<unsigned> m_switchRemap;
1103 // These are blocks whose terminal is a Jump, Branch or Switch, and whose target has not yet been linked.
1104 // Their terminal instead refers to a bytecode index, and the right BB can be found in m_blockLinkingTargets.
1105 Vector<BasicBlock*> m_unlinkedBlocks;
1107 // Potential block linking targets. Must be sorted by bytecodeBegin, and
1108 // cannot have two blocks that have the same bytecodeBegin.
1109 Vector<BasicBlock*> m_blockLinkingTargets;
1111 // Optional: a continuation block for returns to jump to. It is set by early returns if it does not exist.
1112 BasicBlock* m_continuationBlock;
1114 VirtualRegister m_returnValue;
1116 // Speculations about variable types collected from the profiled code block,
1117 // which are based on OSR exit profiles that past DFG compilations of this
1118 // code block had gathered.
1119 LazyOperandValueProfileParser m_lazyOperands;
1121 ICStatusMap m_baselineMap;
1122 ICStatusContext m_optimizedContext;
1124 // Pointers to the argument position trackers for this slice of code.
1125 Vector<ArgumentPosition*> m_argumentPositions;
1127 InlineStackEntry* m_caller;
1132 CodeBlock* profiledBlock,
1133 JSFunction* callee, // Null if this is a closure call.
1134 VirtualRegister returnValueVR,
1135 VirtualRegister inlineCallFrameStart,
1136 int argumentCountIncludingThis,
1137 InlineCallFrame::Kind,
1138 BasicBlock* continuationBlock);
1140 ~InlineStackEntry();
1142 VirtualRegister remapOperand(VirtualRegister operand) const
1144 if (!m_inlineCallFrame)
1147 ASSERT(!operand.isConstant());
1149 return VirtualRegister(operand.offset() + m_inlineCallFrame->stackOffset);
1153 InlineStackEntry* m_inlineStackTop;
1155 ICStatusContextStack m_icContextStack;
1157 struct DelayedSetLocal {
1158 CodeOrigin m_origin;
1159 VirtualRegister m_operand;
1163 DelayedSetLocal() { }
1164 DelayedSetLocal(const CodeOrigin& origin, VirtualRegister operand, Node* value, SetMode setMode)
1166 , m_operand(operand)
1168 , m_setMode(setMode)
1170 RELEASE_ASSERT(operand.isValid());
1173 Node* execute(ByteCodeParser* parser)
1175 if (m_operand.isArgument())
1176 return parser->setArgument(m_origin, m_operand, m_value, m_setMode);
1177 return parser->setLocal(m_origin, m_operand, m_value, m_setMode);
1181 Vector<DelayedSetLocal, 2> m_setLocalQueue;
1183 const Instruction* m_currentInstruction;
1184 bool m_hasDebuggerEnabled;
1185 bool m_hasAnyForceOSRExits { false };
1188 BasicBlock* ByteCodeParser::allocateTargetableBlock(unsigned bytecodeIndex)
1190 ASSERT(bytecodeIndex != UINT_MAX);
1191 Ref<BasicBlock> block = adoptRef(*new BasicBlock(bytecodeIndex, m_numArguments, m_numLocals, 1));
1192 BasicBlock* blockPtr = block.ptr();
1193 // m_blockLinkingTargets must always be sorted in increasing order of bytecodeBegin
1194 if (m_inlineStackTop->m_blockLinkingTargets.size())
1195 ASSERT(m_inlineStackTop->m_blockLinkingTargets.last()->bytecodeBegin < bytecodeIndex);
1196 m_inlineStackTop->m_blockLinkingTargets.append(blockPtr);
1197 m_graph.appendBlock(WTFMove(block));
1201 BasicBlock* ByteCodeParser::allocateUntargetableBlock()
1203 Ref<BasicBlock> block = adoptRef(*new BasicBlock(UINT_MAX, m_numArguments, m_numLocals, 1));
1204 BasicBlock* blockPtr = block.ptr();
1205 m_graph.appendBlock(WTFMove(block));
1209 void ByteCodeParser::makeBlockTargetable(BasicBlock* block, unsigned bytecodeIndex)
1211 RELEASE_ASSERT(block->bytecodeBegin == UINT_MAX);
1212 block->bytecodeBegin = bytecodeIndex;
1213 // m_blockLinkingTargets must always be sorted in increasing order of bytecodeBegin
1214 if (m_inlineStackTop->m_blockLinkingTargets.size())
1215 ASSERT(m_inlineStackTop->m_blockLinkingTargets.last()->bytecodeBegin < bytecodeIndex);
1216 m_inlineStackTop->m_blockLinkingTargets.append(block);
1219 void ByteCodeParser::addJumpTo(BasicBlock* block)
1221 ASSERT(!m_currentBlock->terminal());
1222 Node* jumpNode = addToGraph(Jump);
1223 jumpNode->targetBlock() = block;
1224 m_currentBlock->didLink();
1227 void ByteCodeParser::addJumpTo(unsigned bytecodeIndex)
1229 ASSERT(!m_currentBlock->terminal());
1230 addToGraph(Jump, OpInfo(bytecodeIndex));
1231 m_inlineStackTop->m_unlinkedBlocks.append(m_currentBlock);
1234 template<typename CallOp>
1235 ByteCodeParser::Terminality ByteCodeParser::handleCall(const Instruction* pc, NodeType op, CallMode callMode)
1237 auto bytecode = pc->as<CallOp>();
1238 Node* callTarget = get(bytecode.m_callee);
1239 int registerOffset = -static_cast<int>(bytecode.m_argv);
1241 CallLinkStatus callLinkStatus = CallLinkStatus::computeFor(
1242 m_inlineStackTop->m_profiledBlock, currentCodeOrigin(),
1243 m_inlineStackTop->m_baselineMap, m_icContextStack);
1245 InlineCallFrame::Kind kind = InlineCallFrame::kindFor(callMode);
1247 return handleCall(bytecode.m_dst, op, kind, pc->size(), callTarget,
1248 bytecode.m_argc, registerOffset, callLinkStatus, getPrediction());
1251 void ByteCodeParser::refineStatically(CallLinkStatus& callLinkStatus, Node* callTarget)
1253 if (callTarget->isCellConstant())
1254 callLinkStatus.setProvenConstantCallee(CallVariant(callTarget->asCell()));
1257 ByteCodeParser::Terminality ByteCodeParser::handleCall(
1258 VirtualRegister result, NodeType op, InlineCallFrame::Kind kind, unsigned instructionSize,
1259 Node* callTarget, int argumentCountIncludingThis, int registerOffset,
1260 CallLinkStatus callLinkStatus, SpeculatedType prediction)
1262 ASSERT(registerOffset <= 0);
1264 refineStatically(callLinkStatus, callTarget);
1266 VERBOSE_LOG(" Handling call at ", currentCodeOrigin(), ": ", callLinkStatus, "\n");
1268 // If we have profiling information about this call, and it did not behave too polymorphically,
1269 // we may be able to inline it, or in the case of recursive tail calls turn it into a jump.
1270 if (callLinkStatus.canOptimize()) {
1271 addToGraph(FilterCallLinkStatus, OpInfo(m_graph.m_plan.recordedStatuses().addCallLinkStatus(currentCodeOrigin(), callLinkStatus)), callTarget);
1273 VirtualRegister thisArgument = virtualRegisterForArgument(0, registerOffset);
1274 auto optimizationResult = handleInlining(callTarget, result, callLinkStatus, registerOffset, thisArgument,
1275 argumentCountIncludingThis, m_currentIndex + instructionSize, op, kind, prediction);
1276 if (optimizationResult == CallOptimizationResult::OptimizedToJump)
1278 if (optimizationResult == CallOptimizationResult::Inlined) {
1279 if (UNLIKELY(m_graph.compilation()))
1280 m_graph.compilation()->noticeInlinedCall();
1285 Node* callNode = addCall(result, op, nullptr, callTarget, argumentCountIncludingThis, registerOffset, prediction);
1286 ASSERT(callNode->op() != TailCallVarargs && callNode->op() != TailCallForwardVarargs);
1287 return callNode->op() == TailCall ? Terminal : NonTerminal;
1290 template<typename CallOp>
1291 ByteCodeParser::Terminality ByteCodeParser::handleVarargsCall(const Instruction* pc, NodeType op, CallMode callMode)
1293 auto bytecode = pc->as<CallOp>();
1294 int firstFreeReg = bytecode.m_firstFree.offset();
1295 int firstVarArgOffset = bytecode.m_firstVarArg;
1297 SpeculatedType prediction = getPrediction();
1299 Node* callTarget = get(bytecode.m_callee);
1301 CallLinkStatus callLinkStatus = CallLinkStatus::computeFor(
1302 m_inlineStackTop->m_profiledBlock, currentCodeOrigin(),
1303 m_inlineStackTop->m_baselineMap, m_icContextStack);
1304 refineStatically(callLinkStatus, callTarget);
1306 VERBOSE_LOG(" Varargs call link status at ", currentCodeOrigin(), ": ", callLinkStatus, "\n");
1308 if (callLinkStatus.canOptimize()) {
1309 addToGraph(FilterCallLinkStatus, OpInfo(m_graph.m_plan.recordedStatuses().addCallLinkStatus(currentCodeOrigin(), callLinkStatus)), callTarget);
1311 if (handleVarargsInlining(callTarget, bytecode.m_dst,
1312 callLinkStatus, firstFreeReg, bytecode.m_thisValue, bytecode.m_arguments,
1313 firstVarArgOffset, op,
1314 InlineCallFrame::varargsKindFor(callMode))) {
1315 if (UNLIKELY(m_graph.compilation()))
1316 m_graph.compilation()->noticeInlinedCall();
1321 CallVarargsData* data = m_graph.m_callVarargsData.add();
1322 data->firstVarArgOffset = firstVarArgOffset;
1324 Node* thisChild = get(bytecode.m_thisValue);
1325 Node* argumentsChild = nullptr;
1326 if (op != TailCallForwardVarargs)
1327 argumentsChild = get(bytecode.m_arguments);
1329 if (op == TailCallVarargs || op == TailCallForwardVarargs) {
1330 if (allInlineFramesAreTailCalls()) {
1331 addToGraph(op, OpInfo(data), OpInfo(), callTarget, thisChild, argumentsChild);
1334 op = op == TailCallVarargs ? TailCallVarargsInlinedCaller : TailCallForwardVarargsInlinedCaller;
1337 Node* call = addToGraph(op, OpInfo(data), OpInfo(prediction), callTarget, thisChild, argumentsChild);
1338 if (bytecode.m_dst.isValid())
1339 set(bytecode.m_dst, call);
1343 void ByteCodeParser::emitFunctionChecks(CallVariant callee, Node* callTarget, VirtualRegister thisArgumentReg)
1346 if (thisArgumentReg.isValid())
1347 thisArgument = get(thisArgumentReg);
1349 thisArgument = nullptr;
1352 Node* callTargetForCheck;
1353 if (callee.isClosureCall()) {
1354 calleeCell = callee.executable();
1355 callTargetForCheck = addToGraph(GetExecutable, callTarget);
1357 calleeCell = callee.nonExecutableCallee();
1358 callTargetForCheck = callTarget;
1362 addToGraph(CheckCell, OpInfo(m_graph.freeze(calleeCell)), callTargetForCheck);
1364 addToGraph(Phantom, thisArgument);
1367 Node* ByteCodeParser::getArgumentCount()
1369 Node* argumentCount;
1370 if (m_inlineStackTop->m_inlineCallFrame && !m_inlineStackTop->m_inlineCallFrame->isVarargs())
1371 argumentCount = jsConstant(m_graph.freeze(jsNumber(m_inlineStackTop->m_inlineCallFrame->argumentCountIncludingThis))->value());
1373 argumentCount = addToGraph(GetArgumentCountIncludingThis, OpInfo(m_inlineStackTop->m_inlineCallFrame), OpInfo(SpecInt32Only));
1374 return argumentCount;
1377 void ByteCodeParser::emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis)
1379 for (int i = 0; i < argumentCountIncludingThis; ++i)
1380 addToGraph(Phantom, get(virtualRegisterForArgument(i, registerOffset)));
1383 template<typename ChecksFunctor>
1384 bool ByteCodeParser::handleRecursiveTailCall(Node* callTargetNode, CallVariant callVariant, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& emitFunctionCheckIfNeeded)
1386 if (UNLIKELY(!Options::optimizeRecursiveTailCalls()))
1389 auto targetExecutable = callVariant.executable();
1390 InlineStackEntry* stackEntry = m_inlineStackTop;
1392 if (targetExecutable != stackEntry->executable())
1394 VERBOSE_LOG(" We found a recursive tail call, trying to optimize it into a jump.\n");
1396 if (auto* callFrame = stackEntry->m_inlineCallFrame) {
1397 // Some code may statically use the argument count from the InlineCallFrame, so it would be invalid to loop back if it does not match.
1398 // We "continue" instead of returning false in case another stack entry further on the stack has the right number of arguments.
1399 if (argumentCountIncludingThis != static_cast<int>(callFrame->argumentCountIncludingThis))
1402 // We are in the machine code entry (i.e. the original caller).
1403 // If we have more arguments than the number of parameters to the function, it is not clear where we could put them on the stack.
1404 if (argumentCountIncludingThis > m_codeBlock->numParameters())
1408 // If an InlineCallFrame is not a closure, it was optimized using a constant callee.
1409 // Check if this is the same callee that we try to inline here.
1410 if (stackEntry->m_inlineCallFrame && !stackEntry->m_inlineCallFrame->isClosureCall) {
1411 if (stackEntry->m_inlineCallFrame->calleeConstant() != callVariant.function())
1415 // We must add some check that the profiling information was correct and the target of this call is what we thought.
1416 emitFunctionCheckIfNeeded();
1417 // We flush everything, as if we were in the backedge of a loop (see treatment of op_jmp in parseBlock).
1420 // We must set the callee to the right value
1421 if (stackEntry->m_inlineCallFrame) {
1422 if (stackEntry->m_inlineCallFrame->isClosureCall)
1423 setDirect(stackEntry->remapOperand(VirtualRegister(CallFrameSlot::callee)), callTargetNode, NormalSet);
1425 addToGraph(SetCallee, callTargetNode);
1427 // We must set the arguments to the right values
1428 if (!stackEntry->m_inlineCallFrame)
1429 addToGraph(SetArgumentCountIncludingThis, OpInfo(argumentCountIncludingThis));
1431 for (; argIndex < argumentCountIncludingThis; ++argIndex) {
1432 Node* value = get(virtualRegisterForArgument(argIndex, registerOffset));
1433 setDirect(stackEntry->remapOperand(virtualRegisterForArgument(argIndex)), value, NormalSet);
1435 Node* undefined = addToGraph(JSConstant, OpInfo(m_constantUndefined));
1436 for (; argIndex < stackEntry->m_codeBlock->numParameters(); ++argIndex)
1437 setDirect(stackEntry->remapOperand(virtualRegisterForArgument(argIndex)), undefined, NormalSet);
1439 // We must repeat the work of op_enter here as we will jump right after it.
1440 // We jump right after it and not before it, because of some invariant saying that a CFG root cannot have predecessors in the IR.
1441 for (int i = 0; i < stackEntry->m_codeBlock->numVars(); ++i)
1442 setDirect(stackEntry->remapOperand(virtualRegisterForLocal(i)), undefined, NormalSet);
1444 // We want to emit the SetLocals with an exit origin that points to the place we are jumping to.
1445 unsigned oldIndex = m_currentIndex;
1446 auto oldStackTop = m_inlineStackTop;
1447 m_inlineStackTop = stackEntry;
1448 m_currentIndex = opcodeLengths[op_enter];
1450 processSetLocalQueue();
1451 m_currentIndex = oldIndex;
1452 m_inlineStackTop = oldStackTop;
1455 BasicBlock** entryBlockPtr = tryBinarySearch<BasicBlock*, unsigned>(stackEntry->m_blockLinkingTargets, stackEntry->m_blockLinkingTargets.size(), opcodeLengths[op_enter], getBytecodeBeginForBlock);
1456 RELEASE_ASSERT(entryBlockPtr);
1457 addJumpTo(*entryBlockPtr);
1459 // It would be unsound to jump over a non-tail call: the "tail" call is not really a tail call in that case.
1460 } while (stackEntry->m_inlineCallFrame && stackEntry->m_inlineCallFrame->kind == InlineCallFrame::TailCall && (stackEntry = stackEntry->m_caller));
1462 // The tail call was not recursive
1466 unsigned ByteCodeParser::inliningCost(CallVariant callee, int argumentCountIncludingThis, InlineCallFrame::Kind kind)
1468 CallMode callMode = InlineCallFrame::callModeFor(kind);
1469 CodeSpecializationKind specializationKind = specializationKindFor(callMode);
1470 VERBOSE_LOG("Considering inlining ", callee, " into ", currentCodeOrigin(), "\n");
1472 if (m_hasDebuggerEnabled) {
1473 VERBOSE_LOG(" Failing because the debugger is in use.\n");
1477 FunctionExecutable* executable = callee.functionExecutable();
1479 VERBOSE_LOG(" Failing because there is no function executable.\n");
1483 // Do we have a code block, and does the code block's size match the heuristics/requirements for
1484 // being an inline candidate? We might not have a code block (1) if code was thrown away,
1485 // (2) if we simply hadn't actually made this call yet or (3) code is a builtin function and
1486 // specialization kind is construct. In the former 2 cases, we could still theoretically attempt
1487 // to inline it if we had a static proof of what was being called; this might happen for example
1488 // if you call a global function, where watchpointing gives us static information. Overall,
1489 // it's a rare case because we expect that any hot callees would have already been compiled.
1490 CodeBlock* codeBlock = executable->baselineCodeBlockFor(specializationKind);
1492 VERBOSE_LOG(" Failing because no code block available.\n");
1496 if (!Options::useArityFixupInlining()) {
1497 if (codeBlock->numParameters() > argumentCountIncludingThis) {
1498 VERBOSE_LOG(" Failing because of arity mismatch.\n");
1503 CapabilityLevel capabilityLevel = inlineFunctionForCapabilityLevel(
1504 codeBlock, specializationKind, callee.isClosureCall());
1505 VERBOSE_LOG(" Call mode: ", callMode, "\n");
1506 VERBOSE_LOG(" Is closure call: ", callee.isClosureCall(), "\n");
1507 VERBOSE_LOG(" Capability level: ", capabilityLevel, "\n");
1508 VERBOSE_LOG(" Might inline function: ", mightInlineFunctionFor(codeBlock, specializationKind), "\n");
1509 VERBOSE_LOG(" Might compile function: ", mightCompileFunctionFor(codeBlock, specializationKind), "\n");
1510 VERBOSE_LOG(" Is supported for inlining: ", isSupportedForInlining(codeBlock), "\n");
1511 VERBOSE_LOG(" Is inlining candidate: ", codeBlock->ownerExecutable()->isInliningCandidate(), "\n");
1512 if (!canInline(capabilityLevel)) {
1513 VERBOSE_LOG(" Failing because the function is not inlineable.\n");
1517 // Check if the caller is already too large. We do this check here because that's just
1518 // where we happen to also have the callee's code block, and we want that for the
1519 // purpose of unsetting SABI.
1520 if (!isSmallEnoughToInlineCodeInto(m_codeBlock)) {
1521 codeBlock->m_shouldAlwaysBeInlined = false;
1522 VERBOSE_LOG(" Failing because the caller is too large.\n");
1526 // FIXME: this should be better at predicting how much bloat we will introduce by inlining
1528 // https://bugs.webkit.org/show_bug.cgi?id=127627
1530 // FIXME: We currently inline functions that have run in LLInt but not in Baseline. These
1531 // functions have very low fidelity profiling, and presumably they weren't very hot if they
1532 // haven't gotten to Baseline yet. Consider not inlining these functions.
1533 // https://bugs.webkit.org/show_bug.cgi?id=145503
1535 // Have we exceeded inline stack depth, or are we trying to inline a recursive call to
1536 // too many levels? If either of these are detected, then don't inline. We adjust our
1537 // heuristics if we are dealing with a function that cannot otherwise be compiled.
1540 unsigned recursion = 0;
1542 for (InlineStackEntry* entry = m_inlineStackTop; entry; entry = entry->m_caller) {
1544 if (depth >= Options::maximumInliningDepth()) {
1545 VERBOSE_LOG(" Failing because depth exceeded.\n");
1549 if (entry->executable() == executable) {
1551 if (recursion >= Options::maximumInliningRecursion()) {
1552 VERBOSE_LOG(" Failing because recursion detected.\n");
1558 VERBOSE_LOG(" Inlining should be possible.\n");
1560 // It might be possible to inline.
1561 return codeBlock->bytecodeCost();
1564 template<typename ChecksFunctor>
1565 void ByteCodeParser::inlineCall(Node* callTargetNode, VirtualRegister result, CallVariant callee, int registerOffset, int argumentCountIncludingThis, InlineCallFrame::Kind kind, BasicBlock* continuationBlock, const ChecksFunctor& insertChecks)
1567 const Instruction* savedCurrentInstruction = m_currentInstruction;
1568 CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1570 ASSERT(inliningCost(callee, argumentCountIncludingThis, kind) != UINT_MAX);
1572 CodeBlock* codeBlock = callee.functionExecutable()->baselineCodeBlockFor(specializationKind);
1573 insertChecks(codeBlock);
1575 // FIXME: Don't flush constants!
1577 // arityFixupCount and numberOfStackPaddingSlots are different. While arityFixupCount does not consider about stack alignment,
1578 // numberOfStackPaddingSlots consider alignment. Consider the following case,
1580 // before: [ ... ][arg0][header]
1581 // after: [ ... ][ext ][arg1][arg0][header]
1583 // In the above case, arityFixupCount is 1. But numberOfStackPaddingSlots is 2 because the stack needs to be aligned.
1584 // We insert extra slots to align stack.
1585 int arityFixupCount = std::max<int>(codeBlock->numParameters() - argumentCountIncludingThis, 0);
1586 int numberOfStackPaddingSlots = CommonSlowPaths::numberOfStackPaddingSlots(codeBlock, argumentCountIncludingThis);
1587 ASSERT(!(numberOfStackPaddingSlots % stackAlignmentRegisters()));
1588 int registerOffsetAfterFixup = registerOffset - numberOfStackPaddingSlots;
1590 int inlineCallFrameStart = m_inlineStackTop->remapOperand(VirtualRegister(registerOffsetAfterFixup)).offset() + CallFrame::headerSizeInRegisters;
1593 VirtualRegister(inlineCallFrameStart).toLocal() + 1 +
1594 CallFrame::headerSizeInRegisters + codeBlock->numCalleeLocals());
1596 size_t argumentPositionStart = m_graph.m_argumentPositions.size();
1598 if (result.isValid())
1599 result = m_inlineStackTop->remapOperand(result);
1601 VariableAccessData* calleeVariable = nullptr;
1602 if (callee.isClosureCall()) {
1603 Node* calleeSet = set(
1604 VirtualRegister(registerOffsetAfterFixup + CallFrameSlot::callee), callTargetNode, ImmediateNakedSet);
1606 calleeVariable = calleeSet->variableAccessData();
1607 calleeVariable->mergeShouldNeverUnbox(true);
1610 InlineStackEntry* callerStackTop = m_inlineStackTop;
1611 InlineStackEntry inlineStackEntry(this, codeBlock, codeBlock, callee.function(), result,
1612 (VirtualRegister)inlineCallFrameStart, argumentCountIncludingThis, kind, continuationBlock);
1614 // This is where the actual inlining really happens.
1615 unsigned oldIndex = m_currentIndex;
1619 case InlineCallFrame::GetterCall:
1620 case InlineCallFrame::SetterCall: {
1621 // When inlining getter and setter calls, we setup a stack frame which does not appear in the bytecode.
1622 // Because Inlining can switch on executable, we could have a graph like this.
1627 // 31: MovHint(loc10)
1628 // 32: SetLocal(loc10)
1629 // 33: MovHint(loc9)
1630 // 34: SetLocal(loc9)
1632 // 37: GetExecutable(@30)
1637 // 42: GetLocal(loc12, bc#7 of caller)
1639 // --> callee: loc9 and loc10 are arguments of callee.
1641 // <HERE, exit to callee, loc9 and loc10 are required in the bytecode>
1643 // When we prune OSR availability at the beginning of BB#2 (bc#7 in the caller), we prune loc9 and loc10's liveness because the caller does not actually have loc9 and loc10.
1644 // However, when we begin executing the callee, we need OSR exit to be aware of where it can recover the arguments to the setter, loc9 and loc10. The MovHints in the inlined
1645 // callee make it so that if we exit at <HERE>, we can recover loc9 and loc10.
1646 for (int index = 0; index < argumentCountIncludingThis; ++index) {
1647 VirtualRegister argumentToGet = callerStackTop->remapOperand(virtualRegisterForArgument(index, registerOffset));
1648 Node* value = getDirect(argumentToGet);
1649 addToGraph(MovHint, OpInfo(argumentToGet.offset()), value);
1650 m_setLocalQueue.append(DelayedSetLocal { currentCodeOrigin(), argumentToGet, value, ImmediateNakedSet });
1658 if (arityFixupCount) {
1659 // Note: we do arity fixup in two phases:
1660 // 1. We get all the values we need and MovHint them to the expected locals.
1661 // 2. We SetLocal them after that. This way, if we exit, the callee's
1662 // frame is already set up. If any SetLocal exits, we have a valid exit state.
1663 // This is required because if we didn't do this in two phases, we may exit in
1664 // the middle of arity fixup from the callee's CodeOrigin. This is unsound because exited
1665 // code does not have arity fixup so that remaining necessary fixups are not executed.
1666 // For example, consider if we need to pad two args:
1667 // [arg3][arg2][arg1][arg0]
1668 // [fix ][fix ][arg3][arg2][arg1][arg0]
1669 // We memcpy starting from arg0 in the direction of arg3. If we were to exit at a type check
1670 // for arg3's SetLocal in the callee's CodeOrigin, we'd exit with a frame like so:
1671 // [arg3][arg2][arg1][arg2][arg1][arg0]
1672 // Since we do not perform arity fixup in the callee, this is the frame used by the callee.
1673 // And the callee would then just end up thinking its argument are:
1674 // [fix ][fix ][arg3][arg2][arg1][arg0]
1675 // which is incorrect.
1677 Node* undefined = addToGraph(JSConstant, OpInfo(m_constantUndefined));
1678 // The stack needs to be aligned due to the JS calling convention. Thus, we have a hole if the count of arguments is not aligned.
1679 // We call this hole "extra slot". Consider the following case, the number of arguments is 2. If this argument
1680 // count does not fulfill the stack alignment requirement, we already inserted extra slots.
1682 // before: [ ... ][ext ][arg1][arg0][header]
1684 // In the above case, one extra slot is inserted. If the code's parameter count is 3, we will fixup arguments.
1685 // At that time, we can simply use this extra slots. So the fixuped stack is the following.
1687 // before: [ ... ][ext ][arg1][arg0][header]
1688 // after: [ ... ][arg2][arg1][arg0][header]
1690 // In such cases, we do not need to move frames.
1691 if (registerOffsetAfterFixup != registerOffset) {
1692 for (int index = 0; index < argumentCountIncludingThis; ++index) {
1693 VirtualRegister argumentToGet = callerStackTop->remapOperand(virtualRegisterForArgument(index, registerOffset));
1694 Node* value = getDirect(argumentToGet);
1695 VirtualRegister argumentToSet = m_inlineStackTop->remapOperand(virtualRegisterForArgument(index));
1696 addToGraph(MovHint, OpInfo(argumentToSet.offset()), value);
1697 m_setLocalQueue.append(DelayedSetLocal { currentCodeOrigin(), argumentToSet, value, ImmediateNakedSet });
1700 for (int index = 0; index < arityFixupCount; ++index) {
1701 VirtualRegister argumentToSet = m_inlineStackTop->remapOperand(virtualRegisterForArgument(argumentCountIncludingThis + index));
1702 addToGraph(MovHint, OpInfo(argumentToSet.offset()), undefined);
1703 m_setLocalQueue.append(DelayedSetLocal { currentCodeOrigin(), argumentToSet, undefined, ImmediateNakedSet });
1706 // At this point, it's OK to OSR exit because we finished setting up
1707 // our callee's frame. We emit an ExitOK below.
1710 // At this point, it's again OK to OSR exit.
1714 processSetLocalQueue();
1716 InlineVariableData inlineVariableData;
1717 inlineVariableData.inlineCallFrame = m_inlineStackTop->m_inlineCallFrame;
1718 inlineVariableData.argumentPositionStart = argumentPositionStart;
1719 inlineVariableData.calleeVariable = 0;
1722 m_inlineStackTop->m_inlineCallFrame->isClosureCall
1723 == callee.isClosureCall());
1724 if (callee.isClosureCall()) {
1725 RELEASE_ASSERT(calleeVariable);
1726 inlineVariableData.calleeVariable = calleeVariable;
1729 m_graph.m_inlineVariableData.append(inlineVariableData);
1732 clearCaches(); // Reset our state now that we're back to the outer code.
1734 m_currentIndex = oldIndex;
1737 linkBlocks(inlineStackEntry.m_unlinkedBlocks, inlineStackEntry.m_blockLinkingTargets);
1739 // Most functions have at least one op_ret and thus set up the continuation block.
1740 // In some rare cases, a function ends in op_unreachable, forcing us to allocate a new continuationBlock here.
1741 if (inlineStackEntry.m_continuationBlock)
1742 m_currentBlock = inlineStackEntry.m_continuationBlock;
1744 m_currentBlock = allocateUntargetableBlock();
1745 ASSERT(!m_currentBlock->terminal());
1747 prepareToParseBlock();
1748 m_currentInstruction = savedCurrentInstruction;
1751 ByteCodeParser::CallOptimizationResult ByteCodeParser::handleCallVariant(Node* callTargetNode, VirtualRegister result, CallVariant callee, int registerOffset, VirtualRegister thisArgument, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind kind, SpeculatedType prediction, unsigned& inliningBalance, BasicBlock* continuationBlock, bool needsToCheckCallee)
1753 VERBOSE_LOG(" Considering callee ", callee, "\n");
1755 bool didInsertChecks = false;
1756 auto insertChecksWithAccounting = [&] () {
1757 if (needsToCheckCallee)
1758 emitFunctionChecks(callee, callTargetNode, thisArgument);
1759 didInsertChecks = true;
1762 if (kind == InlineCallFrame::TailCall && ByteCodeParser::handleRecursiveTailCall(callTargetNode, callee, registerOffset, argumentCountIncludingThis, insertChecksWithAccounting)) {
1763 RELEASE_ASSERT(didInsertChecks);
1764 return CallOptimizationResult::OptimizedToJump;
1766 RELEASE_ASSERT(!didInsertChecks);
1768 if (!inliningBalance)
1769 return CallOptimizationResult::DidNothing;
1771 CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1773 auto endSpecialCase = [&] () {
1774 RELEASE_ASSERT(didInsertChecks);
1775 addToGraph(Phantom, callTargetNode);
1776 emitArgumentPhantoms(registerOffset, argumentCountIncludingThis);
1778 if (continuationBlock) {
1779 m_currentIndex = nextOffset;
1781 processSetLocalQueue();
1782 addJumpTo(continuationBlock);
1786 if (InternalFunction* function = callee.internalFunction()) {
1787 if (handleConstantInternalFunction(callTargetNode, result, function, registerOffset, argumentCountIncludingThis, specializationKind, prediction, insertChecksWithAccounting)) {
1789 return CallOptimizationResult::Inlined;
1791 RELEASE_ASSERT(!didInsertChecks);
1792 return CallOptimizationResult::DidNothing;
1795 Intrinsic intrinsic = callee.intrinsicFor(specializationKind);
1796 if (intrinsic != NoIntrinsic) {
1797 if (handleIntrinsicCall(callTargetNode, result, intrinsic, registerOffset, argumentCountIncludingThis, prediction, insertChecksWithAccounting)) {
1799 return CallOptimizationResult::Inlined;
1801 RELEASE_ASSERT(!didInsertChecks);
1802 // We might still try to inline the Intrinsic because it might be a builtin JS function.
1805 if (Options::useDOMJIT()) {
1806 if (const DOMJIT::Signature* signature = callee.signatureFor(specializationKind)) {
1807 if (handleDOMJITCall(callTargetNode, result, signature, registerOffset, argumentCountIncludingThis, prediction, insertChecksWithAccounting)) {
1809 return CallOptimizationResult::Inlined;
1811 RELEASE_ASSERT(!didInsertChecks);
1815 unsigned myInliningCost = inliningCost(callee, argumentCountIncludingThis, kind);
1816 if (myInliningCost > inliningBalance)
1817 return CallOptimizationResult::DidNothing;
1819 auto insertCheck = [&] (CodeBlock*) {
1820 if (needsToCheckCallee)
1821 emitFunctionChecks(callee, callTargetNode, thisArgument);
1823 inlineCall(callTargetNode, result, callee, registerOffset, argumentCountIncludingThis, kind, continuationBlock, insertCheck);
1824 inliningBalance -= myInliningCost;
1825 return CallOptimizationResult::Inlined;
1828 bool ByteCodeParser::handleVarargsInlining(Node* callTargetNode, VirtualRegister result,
1829 const CallLinkStatus& callLinkStatus, int firstFreeReg, VirtualRegister thisArgument,
1830 VirtualRegister argumentsArgument, unsigned argumentsOffset,
1831 NodeType callOp, InlineCallFrame::Kind kind)
1833 VERBOSE_LOG("Handling inlining (Varargs)...\nStack: ", currentCodeOrigin(), "\n");
1834 if (callLinkStatus.maxNumArguments() > Options::maximumVarargsForInlining()) {
1835 VERBOSE_LOG("Bailing inlining: too many arguments for varargs inlining.\n");
1838 if (callLinkStatus.couldTakeSlowPath() || callLinkStatus.size() != 1) {
1839 VERBOSE_LOG("Bailing inlining: polymorphic inlining is not yet supported for varargs.\n");
1843 CallVariant callVariant = callLinkStatus[0];
1845 unsigned mandatoryMinimum;
1846 if (FunctionExecutable* functionExecutable = callVariant.functionExecutable())
1847 mandatoryMinimum = functionExecutable->parameterCount();
1849 mandatoryMinimum = 0;
1852 unsigned maxNumArguments = std::max(callLinkStatus.maxNumArguments(), mandatoryMinimum + 1);
1854 CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1855 if (inliningCost(callVariant, maxNumArguments, kind) > getInliningBalance(callLinkStatus, specializationKind)) {
1856 VERBOSE_LOG("Bailing inlining: inlining cost too high.\n");
1860 int registerOffset = firstFreeReg + 1;
1861 registerOffset -= maxNumArguments; // includes "this"
1862 registerOffset -= CallFrame::headerSizeInRegisters;
1863 registerOffset = -WTF::roundUpToMultipleOf(stackAlignmentRegisters(), -registerOffset);
1865 Vector<VirtualRegister> setArgumentMaybes;
1867 auto insertChecks = [&] (CodeBlock* codeBlock) {
1868 emitFunctionChecks(callVariant, callTargetNode, thisArgument);
1870 int remappedRegisterOffset =
1871 m_inlineStackTop->remapOperand(VirtualRegister(registerOffset)).offset();
1873 ensureLocals(VirtualRegister(remappedRegisterOffset).toLocal());
1875 int argumentStart = registerOffset + CallFrame::headerSizeInRegisters;
1876 int remappedArgumentStart = m_inlineStackTop->remapOperand(VirtualRegister(argumentStart)).offset();
1878 LoadVarargsData* data = m_graph.m_loadVarargsData.add();
1879 data->start = VirtualRegister(remappedArgumentStart + 1);
1880 data->count = VirtualRegister(remappedRegisterOffset + CallFrameSlot::argumentCount);
1881 data->offset = argumentsOffset;
1882 data->limit = maxNumArguments;
1883 data->mandatoryMinimum = mandatoryMinimum;
1885 if (callOp == TailCallForwardVarargs)
1886 addToGraph(ForwardVarargs, OpInfo(data));
1888 addToGraph(LoadVarargs, OpInfo(data), get(argumentsArgument));
1890 // LoadVarargs may OSR exit. Hence, we need to keep alive callTargetNode, thisArgument
1891 // and argumentsArgument for the baseline JIT. However, we only need a Phantom for
1892 // callTargetNode because the other 2 are still in use and alive at this point.
1893 addToGraph(Phantom, callTargetNode);
1895 // In DFG IR before SSA, we cannot insert control flow between after the
1896 // LoadVarargs and the last SetArgumentDefinitely. This isn't a problem once we get to DFG
1897 // SSA. Fortunately, we also have other reasons for not inserting control flow
1900 VariableAccessData* countVariable = newVariableAccessData(VirtualRegister(remappedRegisterOffset + CallFrameSlot::argumentCount));
1901 // This is pretty lame, but it will force the count to be flushed as an int. This doesn't
1902 // matter very much, since our use of a SetArgumentDefinitely and Flushes for this local slot is
1903 // mostly just a formality.
1904 countVariable->predict(SpecInt32Only);
1905 countVariable->mergeIsProfitableToUnbox(true);
1906 Node* setArgumentCount = addToGraph(SetArgumentDefinitely, OpInfo(countVariable));
1907 m_currentBlock->variablesAtTail.setOperand(countVariable->local(), setArgumentCount);
1909 set(VirtualRegister(argumentStart), get(thisArgument), ImmediateNakedSet);
1910 unsigned numSetArguments = 0;
1911 for (unsigned argument = 1; argument < maxNumArguments; ++argument) {
1912 VariableAccessData* variable = newVariableAccessData(VirtualRegister(remappedArgumentStart + argument));
1913 variable->mergeShouldNeverUnbox(true); // We currently have nowhere to put the type check on the LoadVarargs. LoadVarargs is effectful, so after it finishes, we cannot exit.
1915 // For a while it had been my intention to do things like this inside the
1916 // prediction injection phase. But in this case it's really best to do it here,
1917 // because it's here that we have access to the variable access datas for the
1918 // inlining we're about to do.
1920 // Something else that's interesting here is that we'd really love to get
1921 // predictions from the arguments loaded at the callsite, rather than the
1922 // arguments received inside the callee. But that probably won't matter for most
1924 if (codeBlock && argument < static_cast<unsigned>(codeBlock->numParameters())) {
1925 ConcurrentJSLocker locker(codeBlock->m_lock);
1926 ValueProfile& profile = codeBlock->valueProfileForArgument(argument);
1927 variable->predict(profile.computeUpdatedPrediction(locker));
1930 Node* setArgument = addToGraph(numSetArguments >= mandatoryMinimum ? SetArgumentMaybe : SetArgumentDefinitely, OpInfo(variable));
1931 if (numSetArguments >= mandatoryMinimum && Options::useMaximalFlushInsertionPhase())
1932 setArgumentMaybes.append(variable->local());
1933 m_currentBlock->variablesAtTail.setOperand(variable->local(), setArgument);
1938 // Intrinsics and internal functions can only be inlined if we're not doing varargs. This is because
1939 // we currently don't have any way of getting profiling information for arguments to non-JS varargs
1940 // calls. The prediction propagator won't be of any help because LoadVarargs obscures the data flow,
1941 // and there are no callsite value profiles and native function won't have callee value profiles for
1942 // those arguments. Even worse, if the intrinsic decides to exit, it won't really have anywhere to
1943 // exit to: LoadVarargs is effectful and it's part of the op_call_varargs, so we can't exit without
1944 // calling LoadVarargs twice.
1945 inlineCall(callTargetNode, result, callVariant, registerOffset, maxNumArguments, kind, nullptr, insertChecks);
1947 for (VirtualRegister reg : setArgumentMaybes)
1948 setDirect(reg, jsConstant(jsUndefined()), ImmediateNakedSet);
1950 VERBOSE_LOG("Successful inlining (varargs, monomorphic).\nStack: ", currentCodeOrigin(), "\n");
1954 unsigned ByteCodeParser::getInliningBalance(const CallLinkStatus& callLinkStatus, CodeSpecializationKind specializationKind)
1956 unsigned inliningBalance = Options::maximumFunctionForCallInlineCandidateBytecodeCost();
1957 if (specializationKind == CodeForConstruct)
1958 inliningBalance = std::min(inliningBalance, Options::maximumFunctionForConstructInlineCandidateBytecoodeCost());
1959 if (callLinkStatus.isClosureCall())
1960 inliningBalance = std::min(inliningBalance, Options::maximumFunctionForClosureCallInlineCandidateBytecodeCost());
1961 return inliningBalance;
1964 ByteCodeParser::CallOptimizationResult ByteCodeParser::handleInlining(
1965 Node* callTargetNode, VirtualRegister result, const CallLinkStatus& callLinkStatus,
1966 int registerOffset, VirtualRegister thisArgument,
1967 int argumentCountIncludingThis,
1968 unsigned nextOffset, NodeType callOp, InlineCallFrame::Kind kind, SpeculatedType prediction)
1970 VERBOSE_LOG("Handling inlining...\nStack: ", currentCodeOrigin(), "\n");
1972 CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1973 unsigned inliningBalance = getInliningBalance(callLinkStatus, specializationKind);
1975 // First check if we can avoid creating control flow. Our inliner does some CFG
1976 // simplification on the fly and this helps reduce compile times, but we can only leverage
1977 // this in cases where we don't need control flow diamonds to check the callee.
1978 if (!callLinkStatus.couldTakeSlowPath() && callLinkStatus.size() == 1) {
1979 return handleCallVariant(
1980 callTargetNode, result, callLinkStatus[0], registerOffset, thisArgument,
1981 argumentCountIncludingThis, nextOffset, kind, prediction, inliningBalance, nullptr, true);
1984 // We need to create some kind of switch over callee. For now we only do this if we believe that
1985 // we're in the top tier. We have two reasons for this: first, it provides us an opportunity to
1986 // do more detailed polyvariant/polymorphic profiling; and second, it reduces compile times in
1987 // the DFG. And by polyvariant profiling we mean polyvariant profiling of *this* call. Note that
1988 // we could improve that aspect of this by doing polymorphic inlining but having the profiling
1990 if (!m_graph.m_plan.isFTL() || !Options::usePolymorphicCallInlining()) {
1991 VERBOSE_LOG("Bailing inlining (hard).\nStack: ", currentCodeOrigin(), "\n");
1992 return CallOptimizationResult::DidNothing;
1995 // If the claim is that this did not originate from a stub, then we don't want to emit a switch
1996 // statement. Whenever the non-stub profiling says that it could take slow path, it really means that
1998 if (!Options::usePolymorphicCallInliningForNonStubStatus()
1999 && !callLinkStatus.isBasedOnStub()) {
2000 VERBOSE_LOG("Bailing inlining (non-stub polymorphism).\nStack: ", currentCodeOrigin(), "\n");
2001 return CallOptimizationResult::DidNothing;
2004 bool allAreClosureCalls = true;
2005 bool allAreDirectCalls = true;
2006 for (unsigned i = callLinkStatus.size(); i--;) {
2007 if (callLinkStatus[i].isClosureCall())
2008 allAreDirectCalls = false;
2010 allAreClosureCalls = false;
2013 Node* thingToSwitchOn;
2014 if (allAreDirectCalls)
2015 thingToSwitchOn = callTargetNode;
2016 else if (allAreClosureCalls)
2017 thingToSwitchOn = addToGraph(GetExecutable, callTargetNode);
2019 // FIXME: We should be able to handle this case, but it's tricky and we don't know of cases
2020 // where it would be beneficial. It might be best to handle these cases as if all calls were
2022 // https://bugs.webkit.org/show_bug.cgi?id=136020
2023 VERBOSE_LOG("Bailing inlining (mix).\nStack: ", currentCodeOrigin(), "\n");
2024 return CallOptimizationResult::DidNothing;
2027 VERBOSE_LOG("Doing hard inlining...\nStack: ", currentCodeOrigin(), "\n");
2029 // This makes me wish that we were in SSA all the time. We need to pick a variable into which to
2030 // store the callee so that it will be accessible to all of the blocks we're about to create. We
2031 // get away with doing an immediate-set here because we wouldn't have performed any side effects
2033 VERBOSE_LOG("Register offset: ", registerOffset);
2034 VirtualRegister calleeReg(registerOffset + CallFrameSlot::callee);
2035 calleeReg = m_inlineStackTop->remapOperand(calleeReg);
2036 VERBOSE_LOG("Callee is going to be ", calleeReg, "\n");
2037 setDirect(calleeReg, callTargetNode, ImmediateSetWithFlush);
2039 // It's OK to exit right now, even though we set some locals. That's because those locals are not
2044 SwitchData& data = *m_graph.m_switchData.add();
2045 data.kind = SwitchCell;
2046 addToGraph(Switch, OpInfo(&data), thingToSwitchOn);
2047 m_currentBlock->didLink();
2049 BasicBlock* continuationBlock = allocateUntargetableBlock();
2050 VERBOSE_LOG("Adding untargetable block ", RawPointer(continuationBlock), " (continuation)\n");
2052 // We may force this true if we give up on inlining any of the edges.
2053 bool couldTakeSlowPath = callLinkStatus.couldTakeSlowPath();
2055 VERBOSE_LOG("About to loop over functions at ", currentCodeOrigin(), ".\n");
2057 unsigned oldOffset = m_currentIndex;
2058 for (unsigned i = 0; i < callLinkStatus.size(); ++i) {
2059 m_currentIndex = oldOffset;
2060 BasicBlock* calleeEntryBlock = allocateUntargetableBlock();
2061 m_currentBlock = calleeEntryBlock;
2062 prepareToParseBlock();
2064 // At the top of each switch case, we can exit.
2067 Node* myCallTargetNode = getDirect(calleeReg);
2069 auto inliningResult = handleCallVariant(
2070 myCallTargetNode, result, callLinkStatus[i], registerOffset,
2071 thisArgument, argumentCountIncludingThis, nextOffset, kind, prediction,
2072 inliningBalance, continuationBlock, false);
2074 if (inliningResult == CallOptimizationResult::DidNothing) {
2075 // That failed so we let the block die. Nothing interesting should have been added to
2076 // the block. We also give up on inlining any of the (less frequent) callees.
2077 ASSERT(m_graph.m_blocks.last() == m_currentBlock);
2078 m_graph.killBlockAndItsContents(m_currentBlock);
2079 m_graph.m_blocks.removeLast();
2080 VERBOSE_LOG("Inlining of a poly call failed, we will have to go through a slow path\n");
2082 // The fact that inlining failed means we need a slow path.
2083 couldTakeSlowPath = true;
2087 JSCell* thingToCaseOn;
2088 if (allAreDirectCalls)
2089 thingToCaseOn = callLinkStatus[i].nonExecutableCallee();
2091 ASSERT(allAreClosureCalls);
2092 thingToCaseOn = callLinkStatus[i].executable();
2094 data.cases.append(SwitchCase(m_graph.freeze(thingToCaseOn), calleeEntryBlock));
2095 VERBOSE_LOG("Finished optimizing ", callLinkStatus[i], " at ", currentCodeOrigin(), ".\n");
2099 m_currentBlock = allocateUntargetableBlock();
2100 m_currentIndex = oldOffset;
2102 data.fallThrough = BranchTarget(m_currentBlock);
2103 prepareToParseBlock();
2104 Node* myCallTargetNode = getDirect(calleeReg);
2105 if (couldTakeSlowPath) {
2107 result, callOp, nullptr, myCallTargetNode, argumentCountIncludingThis,
2108 registerOffset, prediction);
2109 VERBOSE_LOG("We added a call in the slow path\n");
2111 addToGraph(CheckBadCell);
2112 addToGraph(Phantom, myCallTargetNode);
2113 emitArgumentPhantoms(registerOffset, argumentCountIncludingThis);
2115 set(result, addToGraph(BottomValue));
2116 VERBOSE_LOG("couldTakeSlowPath was false\n");
2119 m_currentIndex = nextOffset;
2120 m_exitOK = true; // Origin changed, so it's fine to exit again.
2121 processSetLocalQueue();
2123 if (Node* terminal = m_currentBlock->terminal())
2124 ASSERT_UNUSED(terminal, terminal->op() == TailCall || terminal->op() == TailCallVarargs || terminal->op() == TailCallForwardVarargs);
2126 addJumpTo(continuationBlock);
2129 prepareToParseBlock();
2131 m_currentIndex = oldOffset;
2132 m_currentBlock = continuationBlock;
2135 VERBOSE_LOG("Done inlining (hard).\nStack: ", currentCodeOrigin(), "\n");
2136 return CallOptimizationResult::Inlined;
2139 template<typename ChecksFunctor>
2140 bool ByteCodeParser::handleMinMax(VirtualRegister result, NodeType op, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& insertChecks)
2142 ASSERT(op == ArithMin || op == ArithMax);
2144 if (argumentCountIncludingThis == 1) {
2146 double limit = op == ArithMax ? -std::numeric_limits<double>::infinity() : +std::numeric_limits<double>::infinity();
2147 set(result, addToGraph(JSConstant, OpInfo(m_graph.freeze(jsDoubleNumber(limit)))));
2151 if (argumentCountIncludingThis == 2) {
2153 Node* resultNode = get(VirtualRegister(virtualRegisterForArgument(1, registerOffset)));
2154 addToGraph(Phantom, Edge(resultNode, NumberUse));
2155 set(result, resultNode);
2159 if (argumentCountIncludingThis == 3) {
2161 set(result, addToGraph(op, get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset))));
2165 // Don't handle >=3 arguments for now.
2169 template<typename ChecksFunctor>
2170 bool ByteCodeParser::handleIntrinsicCall(Node* callee, VirtualRegister result, Intrinsic intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks)
2172 VERBOSE_LOG(" The intrinsic is ", intrinsic, "\n");
2174 if (!isOpcodeShape<OpCallShape>(m_currentInstruction))
2177 // It so happens that the code below doesn't handle the invalid result case. We could fix that, but
2178 // it would only benefit intrinsics called as setters, like if you do:
2180 // o.__defineSetter__("foo", Math.pow)
2182 // Which is extremely amusing, but probably not worth optimizing.
2183 if (!result.isValid())
2186 bool didSetResult = false;
2187 auto setResult = [&] (Node* node) {
2188 RELEASE_ASSERT(!didSetResult);
2190 didSetResult = true;
2193 auto inlineIntrinsic = [&] {
2194 switch (intrinsic) {
2196 // Intrinsic Functions:
2198 case AbsIntrinsic: {
2199 if (argumentCountIncludingThis == 1) { // Math.abs()
2201 setResult(addToGraph(JSConstant, OpInfo(m_constantNaN)));
2205 if (!MacroAssembler::supportsFloatingPointAbs())
2209 Node* node = addToGraph(ArithAbs, get(virtualRegisterForArgument(1, registerOffset)));
2210 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
2211 node->mergeFlags(NodeMayOverflowInt32InDFG);
2218 if (handleMinMax(result, intrinsic == MinIntrinsic ? ArithMin : ArithMax, registerOffset, argumentCountIncludingThis, insertChecks)) {
2219 didSetResult = true;
2224 #define DFG_ARITH_UNARY(capitalizedName, lowerName) \
2225 case capitalizedName##Intrinsic:
2226 FOR_EACH_DFG_ARITH_UNARY_OP(DFG_ARITH_UNARY)
2227 #undef DFG_ARITH_UNARY
2229 if (argumentCountIncludingThis == 1) {
2231 setResult(addToGraph(JSConstant, OpInfo(m_constantNaN)));
2234 Arith::UnaryType type = Arith::UnaryType::Sin;
2235 switch (intrinsic) {
2236 #define DFG_ARITH_UNARY(capitalizedName, lowerName) \
2237 case capitalizedName##Intrinsic: \
2238 type = Arith::UnaryType::capitalizedName; \
2240 FOR_EACH_DFG_ARITH_UNARY_OP(DFG_ARITH_UNARY)
2241 #undef DFG_ARITH_UNARY
2243 RELEASE_ASSERT_NOT_REACHED();
2246 setResult(addToGraph(ArithUnary, OpInfo(static_cast<std::underlying_type<Arith::UnaryType>::type>(type)), get(virtualRegisterForArgument(1, registerOffset))));
2250 case FRoundIntrinsic:
2251 case SqrtIntrinsic: {
2252 if (argumentCountIncludingThis == 1) {
2254 setResult(addToGraph(JSConstant, OpInfo(m_constantNaN)));
2258 NodeType nodeType = Unreachable;
2259 switch (intrinsic) {
2260 case FRoundIntrinsic:
2261 nodeType = ArithFRound;
2264 nodeType = ArithSqrt;
2267 RELEASE_ASSERT_NOT_REACHED();
2270 setResult(addToGraph(nodeType, get(virtualRegisterForArgument(1, registerOffset))));
2274 case PowIntrinsic: {
2275 if (argumentCountIncludingThis < 3) {
2276 // Math.pow() and Math.pow(x) return NaN.
2278 setResult(addToGraph(JSConstant, OpInfo(m_constantNaN)));
2282 VirtualRegister xOperand = virtualRegisterForArgument(1, registerOffset);
2283 VirtualRegister yOperand = virtualRegisterForArgument(2, registerOffset);
2284 setResult(addToGraph(ArithPow, get(xOperand), get(yOperand)));
2288 case ArrayPushIntrinsic: {
2289 #if USE(JSVALUE32_64)
2291 if (argumentCountIncludingThis > 2)
2296 if (static_cast<unsigned>(argumentCountIncludingThis) >= MIN_SPARSE_ARRAY_INDEX)
2299 ArrayMode arrayMode = getArrayMode(Array::Write);
2300 if (!arrayMode.isJSArray())
2302 switch (arrayMode.type()) {
2305 case Array::Contiguous:
2306 case Array::ArrayStorage: {
2309 addVarArgChild(nullptr); // For storage.
2310 for (int i = 0; i < argumentCountIncludingThis; ++i)
2311 addVarArgChild(get(virtualRegisterForArgument(i, registerOffset)));
2312 Node* arrayPush = addToGraph(Node::VarArg, ArrayPush, OpInfo(arrayMode.asWord()), OpInfo(prediction));
2313 setResult(arrayPush);
2322 case ArraySliceIntrinsic: {
2323 #if USE(JSVALUE32_64)
2325 // There aren't enough registers for this to be done easily.
2329 if (argumentCountIncludingThis < 1)
2332 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadConstantCache)
2333 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache))
2336 ArrayMode arrayMode = getArrayMode(Array::Read);
2337 if (!arrayMode.isJSArray())
2340 if (!arrayMode.isJSArrayWithOriginalStructure())
2343 switch (arrayMode.type()) {
2346 case Array::Contiguous: {
2347 JSGlobalObject* globalObject = m_graph.globalObjectFor(currentNodeOrigin().semantic);
2349 Structure* arrayPrototypeStructure = globalObject->arrayPrototype()->structure(*m_vm);
2350 Structure* objectPrototypeStructure = globalObject->objectPrototype()->structure(*m_vm);
2352 // FIXME: We could easily relax the Array/Object.prototype transition as long as we OSR exitted if we saw a hole.
2353 // https://bugs.webkit.org/show_bug.cgi?id=173171
2354 if (globalObject->arraySpeciesWatchpointSet().state() == IsWatched
2355 && globalObject->havingABadTimeWatchpoint()->isStillValid()
2356 && arrayPrototypeStructure->transitionWatchpointSetIsStillValid()
2357 && objectPrototypeStructure->transitionWatchpointSetIsStillValid()
2358 && globalObject->arrayPrototypeChainIsSane()) {
2360 m_graph.watchpoints().addLazily(globalObject->arraySpeciesWatchpointSet());
2361 m_graph.watchpoints().addLazily(globalObject->havingABadTimeWatchpoint());
2362 m_graph.registerAndWatchStructureTransition(arrayPrototypeStructure);
2363 m_graph.registerAndWatchStructureTransition(objectPrototypeStructure);
2367 Node* array = get(virtualRegisterForArgument(0, registerOffset));
2368 // We do a few things here to prove that we aren't skipping doing side-effects in an observable way:
2369 // 1. We ensure that the "constructor" property hasn't been changed (because the observable
2370 // effects of slice require that we perform a Get(array, "constructor") and we can skip
2371 // that if we're an original array structure. (We can relax this in the future by using
2372 // TryGetById and CheckCell).
2374 // 2. We check that the array we're calling slice on has the same global object as the lexical
2375 // global object that this code is running in. This requirement is necessary because we setup the
2376 // watchpoints above on the lexical global object. This means that code that calls slice on
2377 // arrays produced by other global objects won't get this optimization. We could relax this
2378 // requirement in the future by checking that the watchpoint hasn't fired at runtime in the code
2379 // we generate instead of registering it as a watchpoint that would invalidate the compilation.
2381 // 3. By proving we're an original array structure, we guarantee that the incoming array
2382 // isn't a subclass of Array.
2384 StructureSet structureSet;
2385 structureSet.add(globalObject->originalArrayStructureForIndexingType(ArrayWithInt32));
2386 structureSet.add(globalObject->originalArrayStructureForIndexingType(ArrayWithContiguous));
2387 structureSet.add(globalObject->originalArrayStructureForIndexingType(ArrayWithDouble));
2388 structureSet.add(globalObject->originalArrayStructureForIndexingType(CopyOnWriteArrayWithInt32));
2389 structureSet.add(globalObject->originalArrayStructureForIndexingType(CopyOnWriteArrayWithContiguous));
2390 structureSet.add(globalObject->originalArrayStructureForIndexingType(CopyOnWriteArrayWithDouble));
2391 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(structureSet)), array);
2393 addVarArgChild(array);
2394 if (argumentCountIncludingThis >= 2)
2395 addVarArgChild(get(virtualRegisterForArgument(1, registerOffset))); // Start index.
2396 if (argumentCountIncludingThis >= 3)
2397 addVarArgChild(get(virtualRegisterForArgument(2, registerOffset))); // End index.
2398 addVarArgChild(addToGraph(GetButterfly, array));
2400 Node* arraySlice = addToGraph(Node::VarArg, ArraySlice, OpInfo(), OpInfo());
2401 setResult(arraySlice);
2411 RELEASE_ASSERT_NOT_REACHED();
2415 case ArrayIndexOfIntrinsic: {
2416 if (argumentCountIncludingThis < 2)
2419 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIndexingType)
2420 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadConstantCache)
2421 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)
2422 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
2425 ArrayMode arrayMode = getArrayMode(Array::Read);
2426 if (!arrayMode.isJSArray())
2429 if (!arrayMode.isJSArrayWithOriginalStructure())
2432 // We do not want to convert arrays into one type just to perform indexOf.
2433 if (arrayMode.doesConversion())
2436 switch (arrayMode.type()) {
2439 case Array::Contiguous: {
2440 JSGlobalObject* globalObject = m_graph.globalObjectFor(currentNodeOrigin().semantic);
2442 Structure* arrayPrototypeStructure = globalObject->arrayPrototype()->structure(*m_vm);
2443 Structure* objectPrototypeStructure = globalObject->objectPrototype()->structure(*m_vm);
2445 // FIXME: We could easily relax the Array/Object.prototype transition as long as we OSR exitted if we saw a hole.
2446 // https://bugs.webkit.org/show_bug.cgi?id=173171
2447 if (arrayPrototypeStructure->transitionWatchpointSetIsStillValid()
2448 && objectPrototypeStructure->transitionWatchpointSetIsStillValid()
2449 && globalObject->arrayPrototypeChainIsSane()) {
2451 m_graph.registerAndWatchStructureTransition(arrayPrototypeStructure);
2452 m_graph.registerAndWatchStructureTransition(objectPrototypeStructure);
2456 Node* array = get(virtualRegisterForArgument(0, registerOffset));
2457 addVarArgChild(array);
2458 addVarArgChild(get(virtualRegisterForArgument(1, registerOffset))); // Search element.
2459 if (argumentCountIncludingThis >= 3)
2460 addVarArgChild(get(virtualRegisterForArgument(2, registerOffset))); // Start index.
2461 addVarArgChild(nullptr);
2463 Node* node = addToGraph(Node::VarArg, ArrayIndexOf, OpInfo(arrayMode.asWord()), OpInfo());
2474 RELEASE_ASSERT_NOT_REACHED();
2479 case ArrayPopIntrinsic: {
2480 if (argumentCountIncludingThis != 1)
2483 ArrayMode arrayMode = getArrayMode(Array::Write);
2484 if (!arrayMode.isJSArray())
2486 switch (arrayMode.type()) {
2489 case Array::Contiguous:
2490 case Array::ArrayStorage: {
2492 Node* arrayPop = addToGraph(ArrayPop, OpInfo(arrayMode.asWord()), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)));
2493 setResult(arrayPop);
2502 case AtomicsAddIntrinsic:
2503 case AtomicsAndIntrinsic:
2504 case AtomicsCompareExchangeIntrinsic:
2505 case AtomicsExchangeIntrinsic:
2506 case AtomicsIsLockFreeIntrinsic:
2507 case AtomicsLoadIntrinsic:
2508 case AtomicsOrIntrinsic:
2509 case AtomicsStoreIntrinsic:
2510 case AtomicsSubIntrinsic:
2511 case AtomicsXorIntrinsic: {
2515 NodeType op = LastNodeType;
2516 Array::Action action = Array::Write;
2517 unsigned numArgs = 0; // Number of actual args; we add one for the backing store pointer.
2518 switch (intrinsic) {
2519 case AtomicsAddIntrinsic:
2523 case AtomicsAndIntrinsic:
2527 case AtomicsCompareExchangeIntrinsic:
2528 op = AtomicsCompareExchange;
2531 case AtomicsExchangeIntrinsic:
2532 op = AtomicsExchange;
2535 case AtomicsIsLockFreeIntrinsic:
2536 // This gets no backing store, but we need no special logic for this since this also does
2537 // not need varargs.
2538 op = AtomicsIsLockFree;
2541 case AtomicsLoadIntrinsic:
2544 action = Array::Read;
2546 case AtomicsOrIntrinsic:
2550 case AtomicsStoreIntrinsic:
2554 case AtomicsSubIntrinsic:
2558 case AtomicsXorIntrinsic:
2563 RELEASE_ASSERT_NOT_REACHED();
2567 if (static_cast<unsigned>(argumentCountIncludingThis) < 1 + numArgs)
2572 Vector<Node*, 3> args;
2573 for (unsigned i = 0; i < numArgs; ++i)
2574 args.append(get(virtualRegisterForArgument(1 + i, registerOffset)));
2577 if (numArgs + 1 <= 3) {
2578 while (args.size() < 3)
2579 args.append(nullptr);
2580 resultNode = addToGraph(op, OpInfo(ArrayMode(Array::SelectUsingPredictions, action).asWord()), OpInfo(prediction), args[0], args[1], args[2]);
2582 for (Node* node : args)
2583 addVarArgChild(node);
2584 addVarArgChild(nullptr);
2585 resultNode = addToGraph(Node::VarArg, op, OpInfo(ArrayMode(Array::SelectUsingPredictions, action).asWord()), OpInfo(prediction));
2588 setResult(resultNode);
2592 case ParseIntIntrinsic: {
2593 if (argumentCountIncludingThis < 2)
2596 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell) || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
2600 VirtualRegister valueOperand = virtualRegisterForArgument(1, registerOffset);
2602 if (argumentCountIncludingThis == 2)
2603 parseInt = addToGraph(ParseInt, OpInfo(), OpInfo(prediction), get(valueOperand));
2605 ASSERT(argumentCountIncludingThis > 2);
2606 VirtualRegister radixOperand = virtualRegisterForArgument(2, registerOffset);
2607 parseInt = addToGraph(ParseInt, OpInfo(), OpInfo(prediction), get(valueOperand), get(radixOperand));
2609 setResult(parseInt);
2613 case CharCodeAtIntrinsic: {
2614 if (argumentCountIncludingThis != 2)
2618 VirtualRegister thisOperand = virtualRegisterForArgument(0, registerOffset);
2619 VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
2620 Node* charCode = addToGraph(StringCharCodeAt, OpInfo(ArrayMode(Array::String, Array::Read).asWord()), get(thisOperand), get(indexOperand));
2622 setResult(charCode);
2626 case CharAtIntrinsic: {
2627 if (argumentCountIncludingThis != 2)
2631 VirtualRegister thisOperand = virtualRegisterForArgument(0, registerOffset);
2632 VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
2633 Node* charCode = addToGraph(StringCharAt, OpInfo(ArrayMode(Array::String, Array::Read).asWord()), get(thisOperand), get(indexOperand));
2635 setResult(charCode);
2638 case Clz32Intrinsic: {
2640 if (argumentCountIncludingThis == 1)
2641 setResult(addToGraph(JSConstant, OpInfo(m_graph.freeze(jsNumber(32)))));
2643 Node* operand = get(virtualRegisterForArgument(1, registerOffset));
2644 setResult(addToGraph(ArithClz32, operand));
2648 case FromCharCodeIntrinsic: {
2649 if (argumentCountIncludingThis != 2)
2653 VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
2654 Node* charCode = addToGraph(StringFromCharCode, get(indexOperand));
2656 setResult(charCode);
2661 case RegExpExecIntrinsic: {
2662 if (argumentCountIncludingThis != 2)
2666 Node* regExpExec = addToGraph(RegExpExec, OpInfo(0), OpInfo(prediction), addToGraph(GetGlobalObject, callee), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)));
2667 setResult(regExpExec);
2672 case RegExpTestIntrinsic:
2673 case RegExpTestFastIntrinsic: {
2674 if (argumentCountIncludingThis != 2)
2677 if (intrinsic == RegExpTestIntrinsic) {
2678 // Don't inline intrinsic if we exited due to one of the primordial RegExp checks failing.
2679 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell))
2682 JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
2683 Structure* regExpStructure = globalObject->regExpStructure();
2684 m_graph.registerStructure(regExpStructure);
2685 ASSERT(regExpStructure->storedPrototype().isObject());
2686 ASSERT(regExpStructure->storedPrototype().asCell()->classInfo(*m_vm) == RegExpPrototype::info());
2688 FrozenValue* regExpPrototypeObjectValue = m_graph.freeze(regExpStructure->storedPrototype());
2689 Structure* regExpPrototypeStructure = regExpPrototypeObjectValue->structure();
2691 auto isRegExpPropertySame = [&] (JSValue primordialProperty, UniquedStringImpl* propertyUID) {
2692 JSValue currentProperty;
2693 if (!m_graph.getRegExpPrototypeProperty(regExpStructure->storedPrototypeObject(), regExpPrototypeStructure, propertyUID, currentProperty))
2696 return currentProperty == primordialProperty;
2699 // Check that RegExp.exec is still the primordial RegExp.prototype.exec
2700 if (!isRegExpPropertySame(globalObject->regExpProtoExecFunction(), m_vm->propertyNames->exec.impl()))
2703 // Check that regExpObject is actually a RegExp object.
2704 Node* regExpObject = get(virtualRegisterForArgument(0, registerOffset));
2705 addToGraph(Check, Edge(regExpObject, RegExpObjectUse));
2707 // Check that regExpObject's exec is actually the primodial RegExp.prototype.exec.
2708 UniquedStringImpl* execPropertyID = m_vm->propertyNames->exec.impl();
2709 unsigned execIndex = m_graph.identifiers().ensure(execPropertyID);
2710 Node* actualProperty = addToGraph(TryGetById, OpInfo(execIndex), OpInfo(SpecFunction), Edge(regExpObject, CellUse));
2711 FrozenValue* regExpPrototypeExec = m_graph.freeze(globalObject->regExpProtoExecFunction());
2712 addToGraph(CheckCell, OpInfo(regExpPrototypeExec), Edge(actualProperty, CellUse));
2716 Node* regExpObject = get(virtualRegisterForArgument(0, registerOffset));
2717 Node* regExpExec = addToGraph(RegExpTest, OpInfo(0), OpInfo(prediction), addToGraph(GetGlobalObject, callee), regExpObject, get(virtualRegisterForArgument(1, registerOffset)));
2718 setResult(regExpExec);
2723 case RegExpMatchFastIntrinsic: {
2724 RELEASE_ASSERT(argumentCountIncludingThis == 2);
2727 Node* regExpMatch = addToGraph(RegExpMatchFast, OpInfo(0), OpInfo(prediction), addToGraph(GetGlobalObject, callee), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)));
2728 setResult(regExpMatch);
2732 case ObjectCreateIntrinsic: {
2733 if (argumentCountIncludingThis != 2)
2737 setResult(addToGraph(ObjectCreate, get(virtualRegisterForArgument(1, registerOffset))));
2741 case ObjectGetPrototypeOfIntrinsic: {
2742 if (argumentCountIncludingThis != 2)
2746 setResult(addToGraph(GetPrototypeOf, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(1, registerOffset))));
2750 case ObjectIsIntrinsic: {
2751 if (argumentCountIncludingThis < 3)
2755 setResult(addToGraph(SameValue, get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset))));
2759 case ObjectKeysIntrinsic: {
2760 if (argumentCountIncludingThis < 2)
2764 setResult(addToGraph(ObjectKeys, get(virtualRegisterForArgument(1, registerOffset))));
2768 case ReflectGetPrototypeOfIntrinsic: {
2769 if (argumentCountIncludingThis != 2)
2773 setResult(addToGraph(GetPrototypeOf, OpInfo(0), OpInfo(prediction), Edge(get(virtualRegisterForArgument(1, registerOffset)), ObjectUse)));
2777 case IsTypedArrayViewIntrinsic: {
2778 ASSERT(argumentCountIncludingThis == 2);
2781 setResult(addToGraph(IsTypedArrayView, OpInfo(prediction), get(virtualRegisterForArgument(1, registerOffset))));
2785 case StringPrototypeValueOfIntrinsic: {
2787 Node* value = get(virtualRegisterForArgument(0, registerOffset));
2788 setResult(addToGraph(StringValueOf, value));
2792 case StringPrototypeReplaceIntrinsic: {
2793 if (argumentCountIncludingThis != 3)
2796 // Don't inline intrinsic if we exited due to "search" not being a RegExp or String object.
2797 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
2800 // Don't inline intrinsic if we exited due to one of the primordial RegExp checks failing.
2801 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell))
2804 JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
2805 Structure* regExpStructure = globalObject->regExpStructure();
2806 m_graph.registerStructure(regExpStructure);
2807 ASSERT(regExpStructure->storedPrototype().isObject());
2808 ASSERT(regExpStructure->storedPrototype().asCell()->classInfo(*m_vm) == RegExpPrototype::info());
2810 FrozenValue* regExpPrototypeObjectValue = m_graph.freeze(regExpStructure->storedPrototype());
2811 Structure* regExpPrototypeStructure = regExpPrototypeObjectValue->structure();
2813 auto isRegExpPropertySame = [&] (JSValue primordialProperty, UniquedStringImpl* propertyUID) {
2814 JSValue currentProperty;
2815 if (!m_graph.getRegExpPrototypeProperty(regExpStructure->storedPrototypeObject(), regExpPrototypeStructure, propertyUID, currentProperty))
2818 return currentProperty == primordialProperty;
2821 // Check that searchRegExp.exec is still the primordial RegExp.prototype.exec
2822 if (!isRegExpPropertySame(globalObject->regExpProtoExecFunction(), m_vm->propertyNames->exec.impl()))
2825 // Check that searchRegExp.global is still the primordial RegExp.prototype.global
2826 if (!isRegExpPropertySame(globalObject->regExpProtoGlobalGetter(), m_vm->propertyNames->global.impl()))
2829 // Check that searchRegExp.unicode is still the primordial RegExp.prototype.unicode
2830 if (!isRegExpPropertySame(globalObject->regExpProtoUnicodeGetter(), m_vm->propertyNames->unicode.impl()))
2833 // Check that searchRegExp[Symbol.match] is still the primordial RegExp.prototype[Symbol.replace]
2834 if (!isRegExpPropertySame(globalObject->regExpProtoSymbolReplaceFunction(), m_vm->propertyNames->replaceSymbol.impl()))
2839 Node* resultNode = addToGraph(StringReplace, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset)));
2840 setResult(resultNode);
2844 case StringPrototypeReplaceRegExpIntrinsic: {
2845 if (argumentCountIncludingThis != 3)
2849 Node* resultNode = addToGraph(StringReplaceRegExp, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset)));
2850 setResult(resultNode);
2854 case RoundIntrinsic:
2855 case FloorIntrinsic:
2857 case TruncIntrinsic: {
2858 if (argumentCountIncludingThis == 1) {
2860 setResult(addToGraph(JSConstant, OpInfo(m_constantNaN)));
2864 Node* operand = get(virtualRegisterForArgument(1, registerOffset));
2866 if (intrinsic == RoundIntrinsic)
2868 else if (intrinsic == FloorIntrinsic)
2870 else if (intrinsic == CeilIntrinsic)
2873 ASSERT(intrinsic == TruncIntrinsic);
2876 Node* roundNode = addToGraph(op, OpInfo(0), OpInfo(prediction), operand);
2877 setResult(roundNode);
2880 case IMulIntrinsic: {
2881 if (argumentCountIncludingThis != 3)
2884 VirtualRegister leftOperand = virtualRegisterForArgument(1, registerOffset);
2885 VirtualRegister rightOperand = virtualRegisterForArgument(2, registerOffset);
2886 Node* left = get(leftOperand);
2887 Node* right = get(rightOperand);
2888 setResult(addToGraph(ArithIMul, left, right));
2892 case RandomIntrinsic: {
2893 if (argumentCountIncludingThis != 1)
2896 setResult(addToGraph(ArithRandom));
2900 case DFGTrueIntrinsic: {
2902 setResult(jsConstant(jsBoolean(true)));
2906 case FTLTrueIntrinsic: {
2908 setResult(jsConstant(jsBoolean(m_graph.m_plan.isFTL())));
2912 case OSRExitIntrinsic: {
2914 addToGraph(ForceOSRExit);
2915 setResult(addToGraph(JSConstant, OpInfo(m_constantUndefined)));
2919 case IsFinalTierIntrinsic: {
2921 setResult(jsConstant(jsBoolean(Options::useFTLJIT() ? m_graph.m_plan.isFTL() : true)));
2925 case SetInt32HeapPredictionIntrinsic: {
2927 for (int i = 1; i < argumentCountIncludingThis; ++i) {
2928 Node* node = get(virtualRegisterForArgument(i, registerOffset));
2929 if (node->hasHeapPrediction())
2930 node->setHeapPrediction(SpecInt32Only);
2932 setResult(addToGraph(JSConstant, OpInfo(m_constantUndefined)));
2936 case CheckInt32Intrinsic: {
2938 for (int i = 1; i < argumentCountIncludingThis; ++i) {
2939 Node* node = get(virtualRegisterForArgument(i, registerOffset));
2940 addToGraph(Phantom, Edge(node, Int32Use));
2942 setResult(jsConstant(jsBoolean(true)));
2946 case FiatInt52Intrinsic: {
2947 if (argumentCountIncludingThis != 2)
2950 VirtualRegister operand = virtualRegisterForArgument(1, registerOffset);
2952 setResult(addToGraph(FiatInt52, get(operand)));
2954 setResult(get(operand));
2958 case JSMapGetIntrinsic: {
2959 if (argumentCountIncludingThis != 2)
2963 Node* map = get(virtualRegisterForArgument(0, registerOffset));
2964 Node* key = get(virtualRegisterForArgument(1, registerOffset));
2965 Node* normalizedKey = addToGraph(NormalizeMapKey, key);
2966 Node* hash = addToGraph(MapHash, normalizedKey);
2967 Node* bucket = addToGraph(GetMapBucket, Edge(map, MapObjectUse), Edge(normalizedKey), Edge(hash));
2968 Node* resultNode = addToGraph(LoadValueFromMapBucket, OpInfo(BucketOwnerType::Map), OpInfo(prediction), bucket);
2969 setResult(resultNode);
2973 case JSSetHasIntrinsic:
2974 case JSMapHasIntrinsic: {
2975 if (argumentCountIncludingThis != 2)
2979 Node* mapOrSet = get(virtualRegisterForArgument(0, registerOffset));
2980 Node* key = get(virtualRegisterForArgument(1, registerOffset));
2981 Node* normalizedKey = addToGraph(NormalizeMapKey, key);
2982 Node* hash = addToGraph(MapHash, normalizedKey);
2983 UseKind useKind = intrinsic == JSSetHasIntrinsic ? SetObjectUse : MapObjectUse;
2984 Node* bucket = addToGraph(GetMapBucket, OpInfo(0), Edge(mapOrSet, useKind), Edge(normalizedKey), Edge(hash));
2985 JSCell* sentinel = nullptr;
2986 if (intrinsic == JSMapHasIntrinsic)
2987 sentinel = m_vm->sentinelMapBucket();
2989 sentinel = m_vm->sentinelSetBucket();
2991 FrozenValue* frozenPointer = m_graph.freeze(sentinel);
2992 Node* invertedResult = addToGraph(CompareEqPtr, OpInfo(frozenPointer), bucket);
2993 Node* resultNode = addToGraph(LogicalNot, invertedResult);
2994 setResult(resultNode);
2998 case JSSetAddIntrinsic: {
2999 if (argumentCountIncludingThis != 2)
3003 Node* base = get(virtualRegisterForArgument(0, registerOffset));
3004 Node* key = get(virtualRegisterForArgument(1, registerOffset));
3005 Node* normalizedKey = addToGraph(NormalizeMapKey, key);
3006 Node* hash = addToGraph(MapHash, normalizedKey);
3007 addToGraph(SetAdd, base, normalizedKey, hash);
3012 case JSMapSetIntrinsic: {
3013 if (argumentCountIncludingThis != 3)
3017 Node* base = get(virtualRegisterForArgument(0, registerOffset));
3018 Node* key = get(virtualRegisterForArgument(1, registerOffset));
3019 Node* value = get(virtualRegisterForArgument(2, registerOffset));
3021 Node* normalizedKey = addToGraph(NormalizeMapKey, key);
3022 Node* hash = addToGraph(MapHash, normalizedKey);
3024 addVarArgChild(base);
3025 addVarArgChild(normalizedKey);
3026 addVarArgChild(value);
3027 addVarArgChild(hash);
3028 addToGraph(Node::VarArg, MapSet, OpInfo(0), OpInfo(0));
3033 case JSSetBucketHeadIntrinsic:
3034 case JSMapBucketHeadIntrinsic: {
3035 ASSERT(argumentCountIncludingThis == 2);
3038 Node* map = get(virtualRegisterForArgument(1, registerOffset));
3039 UseKind useKind = intrinsic == JSSetBucketHeadIntrinsic ? SetObjectUse : MapObjectUse;
3040 Node* resultNode = addToGraph(GetMapBucketHead, Edge(map, useKind));
3041 setResult(resultNode);
3045 case JSSetBucketNextIntrinsic:
3046 case JSMapBucketNextIntrinsic: {
3047 ASSERT(argumentCountIncludingThis == 2);
3050 Node* bucket = get(virtualRegisterForArgument(1, registerOffset));
3051 BucketOwnerType type = intrinsic == JSSetBucketNextIntrinsic ? BucketOwnerType::Set : BucketOwnerType::Map;
3052 Node* resultNode = addToGraph(GetMapBucketNext, OpInfo(type), bucket);
3053 setResult(resultNode);
3057 case JSSetBucketKeyIntrinsic:
3058 case JSMapBucketKeyIntrinsic: {
3059 ASSERT(argumentCountIncludingThis == 2);
3062 Node* bucket = get(virtualRegisterForArgument(1, registerOffset));
3063 BucketOwnerType type = intrinsic == JSSetBucketKeyIntrinsic ? BucketOwnerType::Set : BucketOwnerType::Map;
3064 Node* resultNode = addToGraph(LoadKeyFromMapBucket, OpInfo(type), OpInfo(prediction), bucket);
3065 setResult(resultNode);
3069 case JSMapBucketValueIntrinsic: {
3070 ASSERT(argumentCountIncludingThis == 2);
3073 Node* bucket = get(virtualRegisterForArgument(1, registerOffset));
3074 Node* resultNode = addToGraph(LoadValueFromMapBucket, OpInfo(BucketOwnerType::Map), OpInfo(prediction), bucket);
3075 setResult(resultNode);
3079 case JSWeakMapGetIntrinsic: {
3080 if (argumentCountIncludingThis != 2)
3083 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3087 Node* map = get(virtualRegisterForArgument(0, registerOffset));
3088 Node* key = get(virtualRegisterForArgument(1, registerOffset));
3089 addToGraph(Check, Edge(key, ObjectUse));
3090 Node* hash = addToGraph(MapHash, key);
3091 Node* holder = addToGraph(WeakMapGet, Edge(map, WeakMapObjectUse), Edge(key, ObjectUse), Edge(hash, Int32Use));
3092 Node* resultNode = addToGraph(ExtractValueFromWeakMapGet, OpInfo(), OpInfo(prediction), holder);
3094 setResult(resultNode);
3098 case JSWeakMapHasIntrinsic: {
3099 if (argumentCountIncludingThis != 2)
3102 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3106 Node* map = get(virtualRegisterForArgument(0, registerOffset));
3107 Node* key = get(virtualRegisterForArgument(1, registerOffset));
3108 addToGraph(Check, Edge(key, ObjectUse));
3109 Node* hash = addToGraph(MapHash, key);
3110 Node* holder = addToGraph(WeakMapGet, Edge(map, WeakMapObjectUse), Edge(key, ObjectUse), Edge(hash, Int32Use));
3111 Node* invertedResult = addToGraph(IsEmpty, holder);
3112 Node* resultNode = addToGraph(LogicalNot, invertedResult);
3114 setResult(resultNode);
3118 case JSWeakSetHasIntrinsic: {
3119 if (argumentCountIncludingThis != 2)
3122 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3126 Node* map = get(virtualRegisterForArgument(0, registerOffset));
3127 Node* key = get(virtualRegisterForArgument(1, registerOffset));
3128 addToGraph(Check, Edge(key, ObjectUse));
3129 Node* hash = addToGraph(MapHash, key);
3130 Node* holder = addToGraph(WeakMapGet, Edge(map, WeakSetObjectUse), Edge(key, ObjectUse), Edge(hash, Int32Use));
3131 Node* invertedResult = addToGraph(IsEmpty, holder);
3132 Node* resultNode = addToGraph(LogicalNot, invertedResult);
3134 setResult(resultNode);
3138 case JSWeakSetAddIntrinsic: {
3139 if (argumentCountIncludingThis != 2)
3142 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3146 Node* base = get(virtualRegisterForArgument(0, registerOffset));
3147 Node* key = get(virtualRegisterForArgument(1, registerOffset));
3148 addToGraph(Check, Edge(key, ObjectUse));
3149 Node* hash = addToGraph(MapHash, key);
3150 addToGraph(WeakSetAdd, Edge(base, WeakSetObjectUse), Edge(key, ObjectUse), Edge(hash, Int32Use));
3155 case JSWeakMapSetIntrinsic: {
3156 if (argumentCountIncludingThis != 3)
3159 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3163 Node* base = get(virtualRegisterForArgument(0, registerOffset));
3164 Node* key = get(virtualRegisterForArgument(1, registerOffset));
3165 Node* value = get(virtualRegisterForArgument(2, registerOffset));
3167 addToGraph(Check, Edge(key, ObjectUse));
3168 Node* hash = addToGraph(MapHash, key);
3170 addVarArgChild(Edge(base, WeakMapObjectUse));
3171 addVarArgChild(Edge(key, ObjectUse));
3172 addVarArgChild(Edge(value));
3173 addVarArgChild(Edge(hash, Int32Use));
3174 addToGraph(Node::VarArg, WeakMapSet, OpInfo(0), OpInfo(0));
3179 case DataViewGetInt8:
3180 case DataViewGetUint8:
3181 case DataViewGetInt16:
3182 case DataViewGetUint16:
3183 case DataViewGetInt32:
3184 case DataViewGetUint32:
3185 case DataViewGetFloat32:
3186 case DataViewGetFloat64: {
3190 // To inline data view accesses, we assume the architecture we're running on:
3191 // - Is little endian.
3192 // - Allows unaligned loads/stores without crashing.
3194 if (argumentCountIncludingThis < 2)
3196 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3202 NodeType op = DataViewGetInt;
3203 bool isSigned = false;
3204 switch (intrinsic) {
3205 case DataViewGetInt8:
3208 case DataViewGetUint8:
3212 case DataViewGetInt16:
3215 case DataViewGetUint16:
3219 case DataViewGetInt32:
3222 case DataViewGetUint32:
3226 case DataViewGetFloat32:
3228 op = DataViewGetFloat;
3230 case DataViewGetFloat64:
3232 op = DataViewGetFloat;
3235 RELEASE_ASSERT_NOT_REACHED();
3238 TriState isLittleEndian = MixedTriState;
3239 Node* littleEndianChild = nullptr;
3241 if (argumentCountIncludingThis < 3)
3242 isLittleEndian = FalseTriState;
3244 littleEndianChild = get(virtualRegisterForArgument(2, registerOffset));
3245 if (littleEndianChild->hasConstant()) {
3246 JSValue constant = littleEndianChild->constant()->value();
3248 isLittleEndian = constant.pureToBoolean();
3249 if (isLittleEndian != MixedTriState)
3250 littleEndianChild = nullptr;
3253 isLittleEndian = MixedTriState;
3257 DataViewData data { };
3258 data.isLittleEndian = isLittleEndian;
3259 data.isSigned = isSigned;
3260 data.byteSize = byteSize;
3263 addToGraph(op, OpInfo(data.asQuadWord), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)), littleEndianChild));
3267 case DataViewSetInt8:
3268 case DataViewSetUint8:
3269 case DataViewSetInt16:
3270 case DataViewSetUint16:
3271 case DataViewSetInt32:
3272 case DataViewSetUint32:
3273 case DataViewSetFloat32:
3274 case DataViewSetFloat64: {
3278 if (argumentCountIncludingThis < 3)
3281 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3287 bool isFloatingPoint = false;
3288 bool isSigned = false;
3289 switch (intrinsic) {
3290 case DataViewSetInt8:
3293 case DataViewSetUint8:
3297 case DataViewSetInt16:
3300 case DataViewSetUint16:
3304 case DataViewSetInt32:
3307 case DataViewSetUint32:
3311 case DataViewSetFloat32:
3312 isFloatingPoint = true;
3315 case DataViewSetFloat64:
3316 isFloatingPoint = true;
3320 RELEASE_ASSERT_NOT_REACHED();
3323 TriState isLittleEndian = MixedTriState;
3324 Node* littleEndianChild = nullptr;
3326 if (argumentCountIncludingThis < 4)
3327 isLittleEndian = FalseTriState;
3329 littleEndianChild = get(virtualRegisterForArgument(3, registerOffset));
3330 if (littleEndianChild->hasConstant()) {
3331 JSValue constant = littleEndianChild->constant()->value();
3333 isLittleEndian = constant.pureToBoolean();
3334 if (isLittleEndian != MixedTriState)
3335 littleEndianChild = nullptr;
3338 isLittleEndian = MixedTriState;
3342 DataViewData data { };
3343 data.isLittleEndian = isLittleEndian;
3344 data.isSigned = isSigned;
3345 data.byteSize = byteSize;
3346 data.isFloatingPoint = isFloatingPoint;
3348 addVarArgChild(get(virtualRegisterForArgument(0, registerOffset)));
3349 addVarArgChild(get(virtualRegisterForArgument(1, registerOffset)));
3350 addVarArgChild(get(virtualRegisterForArgument(2, registerOffset)));
3351 addVarArgChild(littleEndianChild);
3353 addToGraph(Node::VarArg, DataViewSet, OpInfo(data.asQuadWord), OpInfo());
3354 setResult(addToGraph(JSConstant, OpInfo(m_constantUndefined)));
3358 case HasOwnPropertyIntrinsic: {
3359 if (argumentCountIncludingThis != 2)
3362 // This can be racy, that's fine. We know that once we observe that this is created,
3363 // that it will never be destroyed until the VM is destroyed. It's unlikely that
3364 // we'd ever get to the point where we inline this as an intrinsic without the
3365 // cache being created, however, it's possible if we always throw exceptions inside
3367 if (!m_vm->hasOwnPropertyCache())
3371 Node* object = get(virtualRegisterForArgument(0, registerOffset));
3372 Node* key = get(virtualRegisterForArgument(1, registerOffset));
3373 Node* resultNode = addToGraph(HasOwnProperty, object, key);
3374 setResult(resultNode);
3378 case StringPrototypeSliceIntrinsic: {
3379 if (argumentCountIncludingThis < 2)
3382 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3386 Node* thisString = get(virtualRegisterForArgument(0, registerOffset));
3387 Node* start = get(virtualRegisterForArgument(1, registerOffset));
3388 Node* end = nullptr;
3389 if (argumentCountIncludingThis > 2)
3390 end = get(virtualRegisterForArgument(2, registerOffset));
3391 Node* resultNode = addToGraph(StringSlice, thisString, start, end);
3392 setResult(resultNode);
3396 case StringPrototypeToLowerCaseIntrinsic: {
3397 if (argumentCountIncludingThis != 1)
3400 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3404 Node* thisString = get(virtualRegisterForArgument(0, registerOffset));
3405 Node* resultNode = addToGraph(ToLowerCase, thisString);
3406 setResult(resultNode);
3410 case NumberPrototypeToStringIntrinsic: {
3411 if (argumentCountIncludingThis != 1 && argumentCountIncludingThis != 2)
3414 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3418 Node* thisNumber = get(virtualRegisterForArgument(0, registerOffset));
3419 if (argumentCountIncludingThis == 1) {
3420 Node* resultNode = addToGraph(ToString, thisNumber);
3421 setResult(resultNode);
3423 Node* radix = get(virtualRegisterForArgument(1, registerOffset));
3424 Node* resultNode = addToGraph(NumberToStringWithRadix, thisNumber, radix);
3425 setResult(resultNode);
3430 case NumberIsIntegerIntrinsic: {
3431 if (argumentCountIncludingThis < 2)
3435 Node* input = get(virtualRegisterForArgument(1, registerOffset));
3436 Node* resultNode = addToGraph(NumberIsInteger, input);
3437 setResult(resultNode);
3441 case CPUMfenceIntrinsic:
3442 case CPURdtscIntrinsic:
3443 case CPUCpuidIntrinsic:
3444 case CPUPauseIntrinsic: {
3446 if (!m_graph.m_plan.isFTL())
3449 setResult(addToGraph(CPUIntrinsic, OpInfo(intrinsic), OpInfo()));
3461 if (inlineIntrinsic()) {
3462 RELEASE_ASSERT(didSetResult);
3469 template<typename ChecksFunctor>
3470 bool ByteCodeParser::handleDOMJITCall(Node* callTarget, VirtualRegister result, const DOMJIT::Signature* signature, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks)
3472 if (argumentCountIncludingThis != static_cast<int>(1 + signature->argumentCount))
3474 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3477 // FIXME: Currently, we only support functions which arguments are up to 2.
3478 // Eventually, we should extend this. But possibly, 2 or 3 can cover typical use cases.
3479 // https://bugs.webkit.org/show_bug.cgi?id=164346
3480 ASSERT_WITH_MESSAGE(argumentCountIncludingThis <= JSC_DOMJIT_SIGNATURE_MAX_ARGUMENTS_INCLUDING_THIS, "Currently CallDOM does not support an arbitrary length arguments.");
3483 addCall(result, Call, signature, callTarget, argumentCountIncludingThis, registerOffset, prediction);
3488 template<typename ChecksFunctor>
3489 bool ByteCodeParser::handleIntrinsicGetter(VirtualRegister result, SpeculatedType prediction, const GetByIdVariant& variant, Node* thisNode, const ChecksFunctor& insertChecks)
3491 switch (variant.intrinsic()) {
3492 case TypedArrayByteLengthIntrinsic: {
3495 TypedArrayType type = (*variant.structureSet().begin())->classInfo()->typedArrayStorageType;
3496 Array::Type arrayType = toArrayType(type);
3497 size_t logSize = logElementSize(type);
3499 variant.structureSet().forEach([&] (Structure* structure) {
3500 TypedArrayType curType = structure->classInfo()->typedArrayStorageType;
3501 ASSERT(logSize == logElementSize(curType));
3502 arrayType = refineTypedArrayType(arrayType, curType);
3503 ASSERT(arrayType != Array::Generic);
3506 Node* lengthNode = addToGraph(GetArrayLength, OpInfo(ArrayMode(arrayType, Array::Read).asWord()), thisNode);
3509 set(result, lengthNode);
3513 // We can use a BitLShift here because typed arrays will never have a byteLength
3514 // that overflows int32.
3515 Node* shiftNode = jsConstant(jsNumber(logSize));
3516 set(result, addToGraph(ArithBitLShift, lengthNode, shiftNode));
3521 case TypedArrayLengthIntrinsic: {
3524 TypedArrayType type = (*variant.structureSet().begin())->classInfo()->typedArrayStorageType;
3525 Array::Type arrayType = toArrayType(type);
3527 variant.structureSet().forEach([&] (Structure* structure) {
3528 TypedArrayType curType = structure->classInfo()->typedArrayStorageType;
3529 arrayType = refineTypedArrayType(arrayType, curType);
3530 ASSERT(arrayType != Array::Generic);
3533 set(result, addToGraph(GetArrayLength, OpInfo(ArrayMode(arrayType, Array::Read).asWord()), thisNode));
3539 case TypedArrayByteOffsetIntrinsic: {
3542 TypedArrayType type = (*variant.structureSet().begin())->classInfo()->typedArrayStorageType;
3543 Array::Type arrayType = toArrayType(type);
3545 variant.structureSet().forEach([&] (Structure* structure) {
3546 TypedArrayType curType = structure->classInfo()->typedArrayStorageType;
3547 arrayType = refineTypedArrayType(arrayType, curType);
3548 ASSERT(arrayType != Array::Generic);
3551 set(result, addToGraph(GetTypedArrayByteOffset, OpInfo(ArrayMode(arrayType, Array::Read).asWord()), thisNode));
3556 case UnderscoreProtoIntrinsic: {
3559 bool canFold = !variant.structureSet().isEmpty();
3561 variant.structureSet().forEach([&] (Structure* structure) {
3562 auto getPrototypeMethod = structure->classInfo()->methodTable.getPrototype;
3563 MethodTable::GetPrototypeFunctionPtr defaultGetPrototype = JSObject::getPrototype;
3564 if (getPrototypeMethod != defaultGetPrototype) {
3569 if (structure->hasPolyProto()) {