2 * Copyright (C) 2011-2019 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "DFGByteCodeParser.h"
31 #include "ArithProfile.h"
32 #include "ArrayConstructor.h"
33 #include "BasicBlockLocation.h"
34 #include "BuiltinNames.h"
35 #include "BytecodeStructs.h"
36 #include "CallLinkStatus.h"
37 #include "CodeBlock.h"
38 #include "CodeBlockWithJITType.h"
39 #include "CommonSlowPaths.h"
40 #include "DFGAbstractHeap.h"
41 #include "DFGArrayMode.h"
43 #include "DFGCapabilities.h"
44 #include "DFGClobberize.h"
45 #include "DFGClobbersExitState.h"
47 #include "DFGJITCode.h"
48 #include "FunctionCodeBlock.h"
49 #include "GetByIdStatus.h"
51 #include "InByIdStatus.h"
52 #include "InstanceOfStatus.h"
53 #include "JSCInlines.h"
54 #include "JSFixedArray.h"
55 #include "JSImmutableButterfly.h"
56 #include "JSModuleEnvironment.h"
57 #include "JSModuleNamespaceObject.h"
58 #include "NumberConstructor.h"
59 #include "ObjectConstructor.h"
60 #include "OpcodeInlines.h"
61 #include "PreciseJumpTargets.h"
62 #include "PutByIdFlags.h"
63 #include "PutByIdStatus.h"
64 #include "RegExpPrototype.h"
65 #include "StackAlignment.h"
66 #include "StringConstructor.h"
67 #include "StructureStubInfo.h"
68 #include "SymbolConstructor.h"
70 #include <wtf/CommaPrinter.h>
71 #include <wtf/HashMap.h>
72 #include <wtf/MathExtras.h>
73 #include <wtf/SetForScope.h>
74 #include <wtf/StdLibExtras.h>
76 namespace JSC { namespace DFG {
78 namespace DFGByteCodeParserInternal {
80 static const bool verbose = false;
82 static const bool verbose = true;
84 } // namespace DFGByteCodeParserInternal
86 #define VERBOSE_LOG(...) do { \
87 if (DFGByteCodeParserInternal::verbose && Options::verboseDFGBytecodeParsing()) \
88 dataLog(__VA_ARGS__); \
91 // === ByteCodeParser ===
93 // This class is used to compile the dataflow graph from a CodeBlock.
94 class ByteCodeParser {
96 ByteCodeParser(Graph& graph)
98 , m_codeBlock(graph.m_codeBlock)
99 , m_profiledBlock(graph.m_profiledBlock)
103 , m_constantUndefined(graph.freeze(jsUndefined()))
104 , m_constantNull(graph.freeze(jsNull()))
105 , m_constantNaN(graph.freeze(jsNumber(PNaN)))
106 , m_constantOne(graph.freeze(jsNumber(1)))
107 , m_numArguments(m_codeBlock->numParameters())
108 , m_numLocals(m_codeBlock->numCalleeLocals())
109 , m_parameterSlots(0)
110 , m_numPassedVarArgs(0)
111 , m_inlineStackTop(0)
112 , m_currentInstruction(0)
113 , m_hasDebuggerEnabled(graph.hasDebuggerEnabled())
115 ASSERT(m_profiledBlock);
118 // Parse a full CodeBlock of bytecode.
122 struct InlineStackEntry;
124 // Just parse from m_currentIndex to the end of the current CodeBlock.
125 void parseCodeBlock();
127 void ensureLocals(unsigned newNumLocals)
129 VERBOSE_LOG(" ensureLocals: trying to raise m_numLocals from ", m_numLocals, " to ", newNumLocals, "\n");
130 if (newNumLocals <= m_numLocals)
132 m_numLocals = newNumLocals;
133 for (size_t i = 0; i < m_graph.numBlocks(); ++i)
134 m_graph.block(i)->ensureLocals(newNumLocals);
137 // Helper for min and max.
138 template<typename ChecksFunctor>
139 bool handleMinMax(VirtualRegister result, NodeType op, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& insertChecks);
141 void refineStatically(CallLinkStatus&, Node* callTarget);
142 // Blocks can either be targetable (i.e. in the m_blockLinkingTargets of one InlineStackEntry) with a well-defined bytecodeBegin,
143 // or they can be untargetable, with bytecodeBegin==UINT_MAX, to be managed manually and not by the linkBlock machinery.
144 // This is used most notably when doing polyvariant inlining (it requires a fair bit of control-flow with no bytecode analog).
145 // It is also used when doing an early return from an inlined callee: it is easier to fix the bytecode index later on if needed
146 // than to move the right index all the way to the treatment of op_ret.
147 BasicBlock* allocateTargetableBlock(unsigned bytecodeIndex);
148 BasicBlock* allocateUntargetableBlock();
149 // An untargetable block can be given a bytecodeIndex to be later managed by linkBlock, but only once, and it can never go in the other direction
150 void makeBlockTargetable(BasicBlock*, unsigned bytecodeIndex);
151 void addJumpTo(BasicBlock*);
152 void addJumpTo(unsigned bytecodeIndex);
153 // Handle calls. This resolves issues surrounding inlining and intrinsics.
154 enum Terminality { Terminal, NonTerminal };
155 Terminality handleCall(
156 VirtualRegister result, NodeType op, InlineCallFrame::Kind, unsigned instructionSize,
157 Node* callTarget, int argumentCountIncludingThis, int registerOffset, CallLinkStatus,
158 SpeculatedType prediction);
159 template<typename CallOp>
160 Terminality handleCall(const Instruction* pc, NodeType op, CallMode);
161 template<typename CallOp>
162 Terminality handleVarargsCall(const Instruction* pc, NodeType op, CallMode);
163 void emitFunctionChecks(CallVariant, Node* callTarget, VirtualRegister thisArgumnt);
164 void emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis);
165 Node* getArgumentCount();
166 template<typename ChecksFunctor>
167 bool handleRecursiveTailCall(Node* callTargetNode, CallVariant, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& emitFunctionCheckIfNeeded);
168 unsigned inliningCost(CallVariant, int argumentCountIncludingThis, InlineCallFrame::Kind); // Return UINT_MAX if it's not an inlining candidate. By convention, intrinsics have a cost of 1.
169 // Handle inlining. Return true if it succeeded, false if we need to plant a call.
170 bool handleVarargsInlining(Node* callTargetNode, VirtualRegister result, const CallLinkStatus&, int registerOffset, VirtualRegister thisArgument, VirtualRegister argumentsArgument, unsigned argumentsOffset, NodeType callOp, InlineCallFrame::Kind);
171 unsigned getInliningBalance(const CallLinkStatus&, CodeSpecializationKind);
172 enum class CallOptimizationResult { OptimizedToJump, Inlined, DidNothing };
173 CallOptimizationResult handleCallVariant(Node* callTargetNode, VirtualRegister result, CallVariant, int registerOffset, VirtualRegister thisArgument, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind, SpeculatedType prediction, unsigned& inliningBalance, BasicBlock* continuationBlock, bool needsToCheckCallee);
174 CallOptimizationResult handleInlining(Node* callTargetNode, VirtualRegister result, const CallLinkStatus&, int registerOffset, VirtualRegister thisArgument, int argumentCountIncludingThis, unsigned nextOffset, NodeType callOp, InlineCallFrame::Kind, SpeculatedType prediction);
175 template<typename ChecksFunctor>
176 void inlineCall(Node* callTargetNode, VirtualRegister result, CallVariant, int registerOffset, int argumentCountIncludingThis, InlineCallFrame::Kind, BasicBlock* continuationBlock, const ChecksFunctor& insertChecks);
177 // Handle intrinsic functions. Return true if it succeeded, false if we need to plant a call.
178 template<typename ChecksFunctor>
179 bool handleIntrinsicCall(Node* callee, VirtualRegister result, Intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks);
180 template<typename ChecksFunctor>
181 bool handleDOMJITCall(Node* callee, VirtualRegister result, const DOMJIT::Signature*, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks);
182 template<typename ChecksFunctor>
183 bool handleIntrinsicGetter(VirtualRegister result, SpeculatedType prediction, const GetByIdVariant& intrinsicVariant, Node* thisNode, const ChecksFunctor& insertChecks);
184 template<typename ChecksFunctor>
185 bool handleTypedArrayConstructor(VirtualRegister result, InternalFunction*, int registerOffset, int argumentCountIncludingThis, TypedArrayType, const ChecksFunctor& insertChecks);
186 template<typename ChecksFunctor>
187 bool handleConstantInternalFunction(Node* callTargetNode, VirtualRegister result, InternalFunction*, int registerOffset, int argumentCountIncludingThis, CodeSpecializationKind, SpeculatedType, const ChecksFunctor& insertChecks);
188 Node* handlePutByOffset(Node* base, unsigned identifier, PropertyOffset, Node* value);
189 Node* handleGetByOffset(SpeculatedType, Node* base, unsigned identifierNumber, PropertyOffset, NodeType = GetByOffset);
190 bool handleDOMJITGetter(VirtualRegister result, const GetByIdVariant&, Node* thisNode, unsigned identifierNumber, SpeculatedType prediction);
191 bool handleModuleNamespaceLoad(VirtualRegister result, SpeculatedType, Node* base, GetByIdStatus);
193 template<typename Bytecode>
194 void handlePutByVal(Bytecode, unsigned instructionSize);
195 template <typename Bytecode>
196 void handlePutAccessorById(NodeType, Bytecode);
197 template <typename Bytecode>
198 void handlePutAccessorByVal(NodeType, Bytecode);
199 template <typename Bytecode>
200 void handleNewFunc(NodeType, Bytecode);
201 template <typename Bytecode>
202 void handleNewFuncExp(NodeType, Bytecode);
204 // Create a presence ObjectPropertyCondition based on some known offset and structure set. Does not
205 // check the validity of the condition, but it may return a null one if it encounters a contradiction.
206 ObjectPropertyCondition presenceLike(
207 JSObject* knownBase, UniquedStringImpl*, PropertyOffset, const StructureSet&);
209 // Attempt to watch the presence of a property. It will watch that the property is present in the same
210 // way as in all of the structures in the set. It may emit code instead of just setting a watchpoint.
211 // Returns true if this all works out.
212 bool checkPresenceLike(JSObject* knownBase, UniquedStringImpl*, PropertyOffset, const StructureSet&);
213 void checkPresenceLike(Node* base, UniquedStringImpl*, PropertyOffset, const StructureSet&);
215 // Works with both GetByIdVariant and the setter form of PutByIdVariant.
216 template<typename VariantType>
217 Node* load(SpeculatedType, Node* base, unsigned identifierNumber, const VariantType&);
219 Node* store(Node* base, unsigned identifier, const PutByIdVariant&, Node* value);
221 template<typename Op>
222 void parseGetById(const Instruction*);
224 VirtualRegister destination, SpeculatedType, Node* base, unsigned identifierNumber, GetByIdStatus, AccessType, unsigned instructionSize);
226 Node* base, unsigned identifierNumber, Node* value, const PutByIdStatus&, bool isDirect);
228 Node* base, unsigned identifierNumber, Node* value, const PutByIdStatus&,
229 bool isDirect, unsigned intructionSize);
231 // Either register a watchpoint or emit a check for this condition. Returns false if the
232 // condition no longer holds, and therefore no reasonable check can be emitted.
233 bool check(const ObjectPropertyCondition&);
235 GetByOffsetMethod promoteToConstant(GetByOffsetMethod);
237 // Either register a watchpoint or emit a check for this condition. It must be a Presence
238 // condition. It will attempt to promote a Presence condition to an Equivalence condition.
239 // Emits code for the loaded value that the condition guards, and returns a node containing
240 // the loaded value. Returns null if the condition no longer holds.
241 GetByOffsetMethod planLoad(const ObjectPropertyCondition&);
242 Node* load(SpeculatedType, unsigned identifierNumber, const GetByOffsetMethod&, NodeType = GetByOffset);
243 Node* load(SpeculatedType, const ObjectPropertyCondition&, NodeType = GetByOffset);
245 // Calls check() for each condition in the set: that is, it either emits checks or registers
246 // watchpoints (or a combination of the two) to make the conditions hold. If any of those
247 // conditions are no longer checkable, returns false.
248 bool check(const ObjectPropertyConditionSet&);
250 // Calls check() for those conditions that aren't the slot base, and calls load() for the slot
251 // base. Does a combination of watchpoint registration and check emission to guard the
252 // conditions, and emits code to load the value from the slot base. Returns a node containing
253 // the loaded value. Returns null if any of the conditions were no longer checkable.
254 GetByOffsetMethod planLoad(const ObjectPropertyConditionSet&);
255 Node* load(SpeculatedType, const ObjectPropertyConditionSet&, NodeType = GetByOffset);
257 void prepareToParseBlock();
260 // Parse a single basic block of bytecode instructions.
261 void parseBlock(unsigned limit);
262 // Link block successors.
263 void linkBlock(BasicBlock*, Vector<BasicBlock*>& possibleTargets);
264 void linkBlocks(Vector<BasicBlock*>& unlinkedBlocks, Vector<BasicBlock*>& possibleTargets);
266 VariableAccessData* newVariableAccessData(VirtualRegister operand)
268 ASSERT(!operand.isConstant());
270 m_graph.m_variableAccessData.append(operand);
271 return &m_graph.m_variableAccessData.last();
274 // Get/Set the operands/result of a bytecode instruction.
275 Node* getDirect(VirtualRegister operand)
277 ASSERT(!operand.isConstant());
279 // Is this an argument?
280 if (operand.isArgument())
281 return getArgument(operand);
284 return getLocal(operand);
287 Node* get(VirtualRegister operand)
289 if (operand.isConstant()) {
290 unsigned constantIndex = operand.toConstantIndex();
291 unsigned oldSize = m_constants.size();
292 if (constantIndex >= oldSize || !m_constants[constantIndex]) {
293 const CodeBlock& codeBlock = *m_inlineStackTop->m_codeBlock;
294 JSValue value = codeBlock.getConstant(operand.offset());
295 SourceCodeRepresentation sourceCodeRepresentation = codeBlock.constantSourceCodeRepresentation(operand.offset());
296 if (constantIndex >= oldSize) {
297 m_constants.grow(constantIndex + 1);
298 for (unsigned i = oldSize; i < m_constants.size(); ++i)
299 m_constants[i] = nullptr;
302 Node* constantNode = nullptr;
303 if (sourceCodeRepresentation == SourceCodeRepresentation::Double)
304 constantNode = addToGraph(DoubleConstant, OpInfo(m_graph.freezeStrong(jsDoubleNumber(value.asNumber()))));
306 constantNode = addToGraph(JSConstant, OpInfo(m_graph.freezeStrong(value)));
307 m_constants[constantIndex] = constantNode;
309 ASSERT(m_constants[constantIndex]);
310 return m_constants[constantIndex];
313 if (inlineCallFrame()) {
314 if (!inlineCallFrame()->isClosureCall) {
315 JSFunction* callee = inlineCallFrame()->calleeConstant();
316 if (operand.offset() == CallFrameSlot::callee)
317 return weakJSConstant(callee);
319 } else if (operand.offset() == CallFrameSlot::callee) {
320 // We have to do some constant-folding here because this enables CreateThis folding. Note
321 // that we don't have such watchpoint-based folding for inlined uses of Callee, since in that
322 // case if the function is a singleton then we already know it.
323 if (FunctionExecutable* executable = jsDynamicCast<FunctionExecutable*>(*m_vm, m_codeBlock->ownerExecutable())) {
324 if (JSFunction* function = executable->singleton().inferredValue()) {
325 m_graph.watchpoints().addLazily(executable);
326 return weakJSConstant(function);
329 return addToGraph(GetCallee);
332 return getDirect(m_inlineStackTop->remapOperand(operand));
336 // A normal set which follows a two-phase commit that spans code origins. During
337 // the current code origin it issues a MovHint, and at the start of the next
338 // code origin there will be a SetLocal. If the local needs flushing, the second
339 // SetLocal will be preceded with a Flush.
342 // A set where the SetLocal happens immediately and there is still a Flush. This
343 // is relevant when assigning to a local in tricky situations for the delayed
344 // SetLocal logic but where we know that we have not performed any side effects
345 // within this code origin. This is a safe replacement for NormalSet anytime we
346 // know that we have not yet performed side effects in this code origin.
347 ImmediateSetWithFlush,
349 // A set where the SetLocal happens immediately and we do not Flush it even if
350 // this is a local that is marked as needing it. This is relevant when
351 // initializing locals at the top of a function.
354 Node* setDirect(VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
356 addToGraph(MovHint, OpInfo(operand.offset()), value);
358 // We can't exit anymore because our OSR exit state has changed.
361 DelayedSetLocal delayed(currentCodeOrigin(), operand, value, setMode);
363 if (setMode == NormalSet) {
364 m_setLocalQueue.append(delayed);
368 return delayed.execute(this);
371 void processSetLocalQueue()
373 for (unsigned i = 0; i < m_setLocalQueue.size(); ++i)
374 m_setLocalQueue[i].execute(this);
375 m_setLocalQueue.shrink(0);
378 Node* set(VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
380 return setDirect(m_inlineStackTop->remapOperand(operand), value, setMode);
383 Node* injectLazyOperandSpeculation(Node* node)
385 ASSERT(node->op() == GetLocal);
386 ASSERT(node->origin.semantic.bytecodeIndex() == m_currentIndex);
387 ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
388 LazyOperandValueProfileKey key(m_currentIndex, node->local());
389 SpeculatedType prediction = m_inlineStackTop->m_lazyOperands.prediction(locker, key);
390 node->variableAccessData()->predict(prediction);
394 // Used in implementing get/set, above, where the operand is a local variable.
395 Node* getLocal(VirtualRegister operand)
397 unsigned local = operand.toLocal();
399 Node* node = m_currentBlock->variablesAtTail.local(local);
401 // This has two goals: 1) link together variable access datas, and 2)
402 // try to avoid creating redundant GetLocals. (1) is required for
403 // correctness - no other phase will ensure that block-local variable
404 // access data unification is done correctly. (2) is purely opportunistic
405 // and is meant as an compile-time optimization only.
407 VariableAccessData* variable;
410 variable = node->variableAccessData();
412 switch (node->op()) {
416 return node->child1().node();
421 variable = newVariableAccessData(operand);
423 node = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variable)));
424 m_currentBlock->variablesAtTail.local(local) = node;
427 Node* setLocal(const CodeOrigin& semanticOrigin, VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
429 SetForScope<CodeOrigin> originChange(m_currentSemanticOrigin, semanticOrigin);
431 unsigned local = operand.toLocal();
433 if (setMode != ImmediateNakedSet) {
434 ArgumentPosition* argumentPosition = findArgumentPositionForLocal(operand);
435 if (argumentPosition)
436 flushDirect(operand, argumentPosition);
437 else if (m_graph.needsScopeRegister() && operand == m_codeBlock->scopeRegister())
441 VariableAccessData* variableAccessData = newVariableAccessData(operand);
442 variableAccessData->mergeStructureCheckHoistingFailed(
443 m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex(), BadCache));
444 variableAccessData->mergeCheckArrayHoistingFailed(
445 m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex(), BadIndexingType));
446 Node* node = addToGraph(SetLocal, OpInfo(variableAccessData), value);
447 m_currentBlock->variablesAtTail.local(local) = node;
451 // Used in implementing get/set, above, where the operand is an argument.
452 Node* getArgument(VirtualRegister operand)
454 unsigned argument = operand.toArgument();
455 ASSERT(argument < m_numArguments);
457 Node* node = m_currentBlock->variablesAtTail.argument(argument);
459 VariableAccessData* variable;
462 variable = node->variableAccessData();
464 switch (node->op()) {
468 return node->child1().node();
473 variable = newVariableAccessData(operand);
475 node = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variable)));
476 m_currentBlock->variablesAtTail.argument(argument) = node;
479 Node* setArgument(const CodeOrigin& semanticOrigin, VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
481 SetForScope<CodeOrigin> originChange(m_currentSemanticOrigin, semanticOrigin);
483 unsigned argument = operand.toArgument();
484 ASSERT(argument < m_numArguments);
486 VariableAccessData* variableAccessData = newVariableAccessData(operand);
488 // Always flush arguments, except for 'this'. If 'this' is created by us,
489 // then make sure that it's never unboxed.
490 if (argument || m_graph.needsFlushedThis()) {
491 if (setMode != ImmediateNakedSet)
492 flushDirect(operand);
495 if (!argument && m_codeBlock->specializationKind() == CodeForConstruct)
496 variableAccessData->mergeShouldNeverUnbox(true);
498 variableAccessData->mergeStructureCheckHoistingFailed(
499 m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex(), BadCache));
500 variableAccessData->mergeCheckArrayHoistingFailed(
501 m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex(), BadIndexingType));
502 Node* node = addToGraph(SetLocal, OpInfo(variableAccessData), value);
503 m_currentBlock->variablesAtTail.argument(argument) = node;
507 ArgumentPosition* findArgumentPositionForArgument(int argument)
509 InlineStackEntry* stack = m_inlineStackTop;
510 while (stack->m_inlineCallFrame)
511 stack = stack->m_caller;
512 return stack->m_argumentPositions[argument];
515 ArgumentPosition* findArgumentPositionForLocal(VirtualRegister operand)
517 for (InlineStackEntry* stack = m_inlineStackTop; ; stack = stack->m_caller) {
518 InlineCallFrame* inlineCallFrame = stack->m_inlineCallFrame;
519 if (!inlineCallFrame)
521 if (operand.offset() < static_cast<int>(inlineCallFrame->stackOffset + CallFrame::headerSizeInRegisters))
523 if (operand.offset() >= static_cast<int>(inlineCallFrame->stackOffset + CallFrame::thisArgumentOffset() + inlineCallFrame->argumentsWithFixup.size()))
525 int argument = VirtualRegister(operand.offset() - inlineCallFrame->stackOffset).toArgument();
526 return stack->m_argumentPositions[argument];
531 ArgumentPosition* findArgumentPosition(VirtualRegister operand)
533 if (operand.isArgument())
534 return findArgumentPositionForArgument(operand.toArgument());
535 return findArgumentPositionForLocal(operand);
538 template<typename AddFlushDirectFunc>
539 void flushImpl(InlineCallFrame* inlineCallFrame, const AddFlushDirectFunc& addFlushDirect)
542 if (inlineCallFrame) {
543 ASSERT(!m_graph.hasDebuggerEnabled());
544 numArguments = inlineCallFrame->argumentsWithFixup.size();
545 if (inlineCallFrame->isClosureCall)
546 addFlushDirect(inlineCallFrame, remapOperand(inlineCallFrame, VirtualRegister(CallFrameSlot::callee)));
547 if (inlineCallFrame->isVarargs())
548 addFlushDirect(inlineCallFrame, remapOperand(inlineCallFrame, VirtualRegister(CallFrameSlot::argumentCount)));
550 numArguments = m_graph.baselineCodeBlockFor(inlineCallFrame)->numParameters();
552 for (unsigned argument = numArguments; argument--;)
553 addFlushDirect(inlineCallFrame, remapOperand(inlineCallFrame, virtualRegisterForArgument(argument)));
555 if (m_graph.needsScopeRegister())
556 addFlushDirect(nullptr, m_graph.m_codeBlock->scopeRegister());
559 template<typename AddFlushDirectFunc, typename AddPhantomLocalDirectFunc>
560 void flushForTerminalImpl(CodeOrigin origin, const AddFlushDirectFunc& addFlushDirect, const AddPhantomLocalDirectFunc& addPhantomLocalDirect)
562 origin.walkUpInlineStack(
563 [&] (CodeOrigin origin) {
564 unsigned bytecodeIndex = origin.bytecodeIndex();
565 InlineCallFrame* inlineCallFrame = origin.inlineCallFrame();
566 flushImpl(inlineCallFrame, addFlushDirect);
568 CodeBlock* codeBlock = m_graph.baselineCodeBlockFor(inlineCallFrame);
569 FullBytecodeLiveness& fullLiveness = m_graph.livenessFor(codeBlock);
570 const FastBitVector& livenessAtBytecode = fullLiveness.getLiveness(bytecodeIndex);
572 for (unsigned local = codeBlock->numCalleeLocals(); local--;) {
573 if (livenessAtBytecode[local])
574 addPhantomLocalDirect(inlineCallFrame, remapOperand(inlineCallFrame, virtualRegisterForLocal(local)));
579 void flush(VirtualRegister operand)
581 flushDirect(m_inlineStackTop->remapOperand(operand));
584 void flushDirect(VirtualRegister operand)
586 flushDirect(operand, findArgumentPosition(operand));
589 void flushDirect(VirtualRegister operand, ArgumentPosition* argumentPosition)
591 addFlushOrPhantomLocal<Flush>(operand, argumentPosition);
594 template<NodeType nodeType>
595 void addFlushOrPhantomLocal(VirtualRegister operand, ArgumentPosition* argumentPosition)
597 ASSERT(!operand.isConstant());
599 Node* node = m_currentBlock->variablesAtTail.operand(operand);
601 VariableAccessData* variable;
604 variable = node->variableAccessData();
606 variable = newVariableAccessData(operand);
608 node = addToGraph(nodeType, OpInfo(variable));
609 m_currentBlock->variablesAtTail.operand(operand) = node;
610 if (argumentPosition)
611 argumentPosition->addVariable(variable);
614 void phantomLocalDirect(VirtualRegister operand)
616 addFlushOrPhantomLocal<PhantomLocal>(operand, findArgumentPosition(operand));
619 void flush(InlineStackEntry* inlineStackEntry)
621 auto addFlushDirect = [&] (InlineCallFrame*, VirtualRegister reg) { flushDirect(reg); };
622 flushImpl(inlineStackEntry->m_inlineCallFrame, addFlushDirect);
625 void flushForTerminal()
627 auto addFlushDirect = [&] (InlineCallFrame*, VirtualRegister reg) { flushDirect(reg); };
628 auto addPhantomLocalDirect = [&] (InlineCallFrame*, VirtualRegister reg) { phantomLocalDirect(reg); };
629 flushForTerminalImpl(currentCodeOrigin(), addFlushDirect, addPhantomLocalDirect);
632 void flushForReturn()
634 flush(m_inlineStackTop);
637 void flushIfTerminal(SwitchData& data)
639 if (data.fallThrough.bytecodeIndex() > m_currentIndex)
642 for (unsigned i = data.cases.size(); i--;) {
643 if (data.cases[i].target.bytecodeIndex() > m_currentIndex)
650 // Assumes that the constant should be strongly marked.
651 Node* jsConstant(JSValue constantValue)
653 return addToGraph(JSConstant, OpInfo(m_graph.freezeStrong(constantValue)));
656 Node* weakJSConstant(JSValue constantValue)
658 return addToGraph(JSConstant, OpInfo(m_graph.freeze(constantValue)));
661 // Helper functions to get/set the this value.
664 return get(m_inlineStackTop->m_codeBlock->thisRegister());
667 void setThis(Node* value)
669 set(m_inlineStackTop->m_codeBlock->thisRegister(), value);
672 InlineCallFrame* inlineCallFrame()
674 return m_inlineStackTop->m_inlineCallFrame;
677 bool allInlineFramesAreTailCalls()
679 return !inlineCallFrame() || !inlineCallFrame()->getCallerSkippingTailCalls();
682 CodeOrigin currentCodeOrigin()
684 return CodeOrigin(m_currentIndex, inlineCallFrame());
687 NodeOrigin currentNodeOrigin()
692 if (m_currentSemanticOrigin.isSet())
693 semantic = m_currentSemanticOrigin;
695 semantic = currentCodeOrigin();
697 forExit = currentCodeOrigin();
699 return NodeOrigin(semantic, forExit, m_exitOK);
702 BranchData* branchData(unsigned taken, unsigned notTaken)
704 // We assume that branches originating from bytecode always have a fall-through. We
705 // use this assumption to avoid checking for the creation of terminal blocks.
706 ASSERT((taken > m_currentIndex) || (notTaken > m_currentIndex));
707 BranchData* data = m_graph.m_branchData.add();
708 *data = BranchData::withBytecodeIndices(taken, notTaken);
712 Node* addToGraph(Node* node)
714 VERBOSE_LOG(" appended ", node, " ", Graph::opName(node->op()), "\n");
716 m_hasAnyForceOSRExits |= (node->op() == ForceOSRExit);
718 m_currentBlock->append(node);
719 if (clobbersExitState(m_graph, node))
724 Node* addToGraph(NodeType op, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
726 Node* result = m_graph.addNode(
727 op, currentNodeOrigin(), Edge(child1), Edge(child2),
729 return addToGraph(result);
731 Node* addToGraph(NodeType op, Edge child1, Edge child2 = Edge(), Edge child3 = Edge())
733 Node* result = m_graph.addNode(
734 op, currentNodeOrigin(), child1, child2, child3);
735 return addToGraph(result);
737 Node* addToGraph(NodeType op, OpInfo info, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
739 Node* result = m_graph.addNode(
740 op, currentNodeOrigin(), info, Edge(child1), Edge(child2),
742 return addToGraph(result);
744 Node* addToGraph(NodeType op, OpInfo info, Edge child1, Edge child2 = Edge(), Edge child3 = Edge())
746 Node* result = m_graph.addNode(op, currentNodeOrigin(), info, child1, child2, child3);
747 return addToGraph(result);
749 Node* addToGraph(NodeType op, OpInfo info1, OpInfo info2, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
751 Node* result = m_graph.addNode(
752 op, currentNodeOrigin(), info1, info2,
753 Edge(child1), Edge(child2), Edge(child3));
754 return addToGraph(result);
756 Node* addToGraph(NodeType op, OpInfo info1, OpInfo info2, Edge child1, Edge child2 = Edge(), Edge child3 = Edge())
758 Node* result = m_graph.addNode(
759 op, currentNodeOrigin(), info1, info2, child1, child2, child3);
760 return addToGraph(result);
763 Node* addToGraph(Node::VarArgTag, NodeType op, OpInfo info1, OpInfo info2 = OpInfo())
765 Node* result = m_graph.addNode(
766 Node::VarArg, op, currentNodeOrigin(), info1, info2,
767 m_graph.m_varArgChildren.size() - m_numPassedVarArgs, m_numPassedVarArgs);
770 m_numPassedVarArgs = 0;
775 void addVarArgChild(Node* child)
777 m_graph.m_varArgChildren.append(Edge(child));
778 m_numPassedVarArgs++;
781 void addVarArgChild(Edge child)
783 m_graph.m_varArgChildren.append(child);
784 m_numPassedVarArgs++;
787 Node* addCallWithoutSettingResult(
788 NodeType op, OpInfo opInfo, Node* callee, int argCount, int registerOffset,
791 addVarArgChild(callee);
792 size_t parameterSlots = Graph::parameterSlotsForArgCount(argCount);
794 if (parameterSlots > m_parameterSlots)
795 m_parameterSlots = parameterSlots;
797 for (int i = 0; i < argCount; ++i)
798 addVarArgChild(get(virtualRegisterForArgument(i, registerOffset)));
800 return addToGraph(Node::VarArg, op, opInfo, prediction);
804 VirtualRegister result, NodeType op, const DOMJIT::Signature* signature, Node* callee, int argCount, int registerOffset,
805 SpeculatedType prediction)
807 if (op == TailCall) {
808 if (allInlineFramesAreTailCalls())
809 return addCallWithoutSettingResult(op, OpInfo(signature), callee, argCount, registerOffset, OpInfo());
810 op = TailCallInlinedCaller;
814 Node* call = addCallWithoutSettingResult(
815 op, OpInfo(signature), callee, argCount, registerOffset, OpInfo(prediction));
816 if (result.isValid())
821 Node* cellConstantWithStructureCheck(JSCell* object, Structure* structure)
823 // FIXME: This should route to emitPropertyCheck, not the other way around. But currently,
824 // this gets no profit from using emitPropertyCheck() since we'll non-adaptively watch the
825 // object's structure as soon as we make it a weakJSCosntant.
826 Node* objectNode = weakJSConstant(object);
827 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(structure)), objectNode);
831 SpeculatedType getPredictionWithoutOSRExit(unsigned bytecodeIndex)
833 auto getValueProfilePredictionFromForCodeBlockAndBytecodeOffset = [&] (CodeBlock* codeBlock, const CodeOrigin& codeOrigin)
835 SpeculatedType prediction;
837 ConcurrentJSLocker locker(codeBlock->m_lock);
838 prediction = codeBlock->valueProfilePredictionForBytecodeOffset(locker, codeOrigin.bytecodeIndex());
840 auto* fuzzerAgent = m_vm->fuzzerAgent();
841 if (UNLIKELY(fuzzerAgent))
842 return fuzzerAgent->getPrediction(codeBlock, codeOrigin, prediction) & SpecBytecodeTop;
846 SpeculatedType prediction = getValueProfilePredictionFromForCodeBlockAndBytecodeOffset(m_inlineStackTop->m_profiledBlock, CodeOrigin(bytecodeIndex, inlineCallFrame()));
847 if (prediction != SpecNone)
850 // If we have no information about the values this
851 // node generates, we check if by any chance it is
852 // a tail call opcode. In that case, we walk up the
853 // inline frames to find a call higher in the call
854 // chain and use its prediction. If we only have
855 // inlined tail call frames, we use SpecFullTop
856 // to avoid a spurious OSR exit.
857 auto instruction = m_inlineStackTop->m_profiledBlock->instructions().at(bytecodeIndex);
858 OpcodeID opcodeID = instruction->opcodeID();
862 case op_tail_call_varargs:
863 case op_tail_call_forward_arguments: {
864 // Things should be more permissive to us returning BOTTOM instead of TOP here.
865 // Currently, this will cause us to Force OSR exit. This is bad because returning
866 // TOP will cause anything that transitively touches this speculated type to
867 // also become TOP during prediction propagation.
868 // https://bugs.webkit.org/show_bug.cgi?id=164337
869 if (!inlineCallFrame())
872 CodeOrigin* codeOrigin = inlineCallFrame()->getCallerSkippingTailCalls();
876 InlineStackEntry* stack = m_inlineStackTop;
877 while (stack->m_inlineCallFrame != codeOrigin->inlineCallFrame())
878 stack = stack->m_caller;
880 return getValueProfilePredictionFromForCodeBlockAndBytecodeOffset(stack->m_profiledBlock, *codeOrigin);
887 RELEASE_ASSERT_NOT_REACHED();
891 SpeculatedType getPrediction(unsigned bytecodeIndex)
893 SpeculatedType prediction = getPredictionWithoutOSRExit(bytecodeIndex);
895 if (prediction == SpecNone) {
896 // We have no information about what values this node generates. Give up
897 // on executing this code, since we're likely to do more damage than good.
898 addToGraph(ForceOSRExit);
904 SpeculatedType getPredictionWithoutOSRExit()
906 return getPredictionWithoutOSRExit(m_currentIndex);
909 SpeculatedType getPrediction()
911 return getPrediction(m_currentIndex);
914 ArrayMode getArrayMode(Array::Action action)
916 CodeBlock* codeBlock = m_inlineStackTop->m_profiledBlock;
917 ArrayProfile* profile = codeBlock->getArrayProfile(codeBlock->bytecodeOffset(m_currentInstruction));
918 return getArrayMode(*profile, action);
921 ArrayMode getArrayMode(ArrayProfile& profile, Array::Action action)
923 ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
924 profile.computeUpdatedPrediction(locker, m_inlineStackTop->m_profiledBlock);
925 bool makeSafe = profile.outOfBounds(locker);
926 return ArrayMode::fromObserved(locker, &profile, action, makeSafe);
929 Node* makeSafe(Node* node)
931 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
932 node->mergeFlags(NodeMayOverflowInt32InDFG);
933 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
934 node->mergeFlags(NodeMayNegZeroInDFG);
936 if (!isX86() && (node->op() == ArithMod || node->op() == ValueMod))
940 ArithProfile* arithProfile = m_inlineStackTop->m_profiledBlock->arithProfileForBytecodeOffset(m_currentIndex);
942 switch (node->op()) {
946 if (arithProfile->didObserveDouble())
947 node->mergeFlags(NodeMayHaveDoubleResult);
948 if (arithProfile->didObserveNonNumeric())
949 node->mergeFlags(NodeMayHaveNonNumericResult);
950 if (arithProfile->didObserveBigInt())
951 node->mergeFlags(NodeMayHaveBigIntResult);
956 if (arithProfile->didObserveInt52Overflow())
957 node->mergeFlags(NodeMayOverflowInt52);
958 if (arithProfile->didObserveInt32Overflow() || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
959 node->mergeFlags(NodeMayOverflowInt32InBaseline);
960 if (arithProfile->didObserveNegZeroDouble() || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
961 node->mergeFlags(NodeMayNegZeroInBaseline);
962 if (arithProfile->didObserveDouble())
963 node->mergeFlags(NodeMayHaveDoubleResult);
964 if (arithProfile->didObserveNonNumeric())
965 node->mergeFlags(NodeMayHaveNonNumericResult);
966 if (arithProfile->didObserveBigInt())
967 node->mergeFlags(NodeMayHaveBigIntResult);
972 if (arithProfile->lhsObservedType().sawNumber() || arithProfile->didObserveDouble())
973 node->mergeFlags(NodeMayHaveDoubleResult);
974 if (arithProfile->didObserveNegZeroDouble() || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
975 node->mergeFlags(NodeMayNegZeroInBaseline);
976 if (arithProfile->didObserveInt32Overflow() || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
977 node->mergeFlags(NodeMayOverflowInt32InBaseline);
978 if (arithProfile->didObserveNonNumeric())
979 node->mergeFlags(NodeMayHaveNonNumericResult);
980 if (arithProfile->didObserveBigInt())
981 node->mergeFlags(NodeMayHaveBigIntResult);
991 if (m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)) {
992 switch (node->op()) {
998 case ArithMod: // for ArithMod "MayOverflow" means we tried to divide by zero, or we saw double.
999 node->mergeFlags(NodeMayOverflowInt32InBaseline);
1010 Node* makeDivSafe(Node* node)
1012 ASSERT(node->op() == ArithDiv || node->op() == ValueDiv);
1014 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
1015 node->mergeFlags(NodeMayOverflowInt32InDFG);
1016 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
1017 node->mergeFlags(NodeMayNegZeroInDFG);
1019 // The main slow case counter for op_div in the old JIT counts only when
1020 // the operands are not numbers. We don't care about that since we already
1021 // have speculations in place that take care of that separately. We only
1022 // care about when the outcome of the division is not an integer, which
1023 // is what the special fast case counter tells us.
1025 if (!m_inlineStackTop->m_profiledBlock->couldTakeSpecialFastCase(m_currentIndex))
1028 // FIXME: It might be possible to make this more granular.
1029 node->mergeFlags(NodeMayOverflowInt32InBaseline | NodeMayNegZeroInBaseline);
1031 ArithProfile* arithProfile = m_inlineStackTop->m_profiledBlock->arithProfileForBytecodeOffset(m_currentIndex);
1032 if (arithProfile->didObserveBigInt())
1033 node->mergeFlags(NodeMayHaveBigIntResult);
1038 void noticeArgumentsUse()
1040 // All of the arguments in this function need to be formatted as JSValues because we will
1041 // load from them in a random-access fashion and we don't want to have to switch on
1044 for (ArgumentPosition* argument : m_inlineStackTop->m_argumentPositions)
1045 argument->mergeShouldNeverUnbox(true);
1048 bool needsDynamicLookup(ResolveType, OpcodeID);
1051 CodeBlock* m_codeBlock;
1052 CodeBlock* m_profiledBlock;
1055 // The current block being generated.
1056 BasicBlock* m_currentBlock;
1057 // The bytecode index of the current instruction being generated.
1058 unsigned m_currentIndex;
1059 // The semantic origin of the current node if different from the current Index.
1060 CodeOrigin m_currentSemanticOrigin;
1061 // True if it's OK to OSR exit right now.
1062 bool m_exitOK { false };
1064 FrozenValue* m_constantUndefined;
1065 FrozenValue* m_constantNull;
1066 FrozenValue* m_constantNaN;
1067 FrozenValue* m_constantOne;
1068 Vector<Node*, 16> m_constants;
1070 HashMap<InlineCallFrame*, Vector<ArgumentPosition*>, WTF::DefaultHash<InlineCallFrame*>::Hash, WTF::NullableHashTraits<InlineCallFrame*>> m_inlineCallFrameToArgumentPositions;
1072 // The number of arguments passed to the function.
1073 unsigned m_numArguments;
1074 // The number of locals (vars + temporaries) used in the function.
1075 unsigned m_numLocals;
1076 // The number of slots (in units of sizeof(Register)) that we need to
1077 // preallocate for arguments to outgoing calls from this frame. This
1078 // number includes the CallFrame slots that we initialize for the callee
1079 // (but not the callee-initialized CallerFrame and ReturnPC slots).
1080 // This number is 0 if and only if this function is a leaf.
1081 unsigned m_parameterSlots;
1082 // The number of var args passed to the next var arg node.
1083 unsigned m_numPassedVarArgs;
1085 struct InlineStackEntry {
1086 ByteCodeParser* m_byteCodeParser;
1088 CodeBlock* m_codeBlock;
1089 CodeBlock* m_profiledBlock;
1090 InlineCallFrame* m_inlineCallFrame;
1092 ScriptExecutable* executable() { return m_codeBlock->ownerExecutable(); }
1094 QueryableExitProfile m_exitProfile;
1096 // Remapping of identifier and constant numbers from the code block being
1097 // inlined (inline callee) to the code block that we're inlining into
1098 // (the machine code block, which is the transitive, though not necessarily
1100 Vector<unsigned> m_identifierRemap;
1101 Vector<unsigned> m_switchRemap;
1103 // These are blocks whose terminal is a Jump, Branch or Switch, and whose target has not yet been linked.
1104 // Their terminal instead refers to a bytecode index, and the right BB can be found in m_blockLinkingTargets.
1105 Vector<BasicBlock*> m_unlinkedBlocks;
1107 // Potential block linking targets. Must be sorted by bytecodeBegin, and
1108 // cannot have two blocks that have the same bytecodeBegin.
1109 Vector<BasicBlock*> m_blockLinkingTargets;
1111 // Optional: a continuation block for returns to jump to. It is set by early returns if it does not exist.
1112 BasicBlock* m_continuationBlock;
1114 VirtualRegister m_returnValue;
1116 // Speculations about variable types collected from the profiled code block,
1117 // which are based on OSR exit profiles that past DFG compilations of this
1118 // code block had gathered.
1119 LazyOperandValueProfileParser m_lazyOperands;
1121 ICStatusMap m_baselineMap;
1122 ICStatusContext m_optimizedContext;
1124 // Pointers to the argument position trackers for this slice of code.
1125 Vector<ArgumentPosition*> m_argumentPositions;
1127 InlineStackEntry* m_caller;
1132 CodeBlock* profiledBlock,
1133 JSFunction* callee, // Null if this is a closure call.
1134 VirtualRegister returnValueVR,
1135 VirtualRegister inlineCallFrameStart,
1136 int argumentCountIncludingThis,
1137 InlineCallFrame::Kind,
1138 BasicBlock* continuationBlock);
1140 ~InlineStackEntry();
1142 VirtualRegister remapOperand(VirtualRegister operand) const
1144 if (!m_inlineCallFrame)
1147 ASSERT(!operand.isConstant());
1149 return VirtualRegister(operand.offset() + m_inlineCallFrame->stackOffset);
1153 InlineStackEntry* m_inlineStackTop;
1155 ICStatusContextStack m_icContextStack;
1157 struct DelayedSetLocal {
1158 CodeOrigin m_origin;
1159 VirtualRegister m_operand;
1163 DelayedSetLocal() { }
1164 DelayedSetLocal(const CodeOrigin& origin, VirtualRegister operand, Node* value, SetMode setMode)
1166 , m_operand(operand)
1168 , m_setMode(setMode)
1170 RELEASE_ASSERT(operand.isValid());
1173 Node* execute(ByteCodeParser* parser)
1175 if (m_operand.isArgument())
1176 return parser->setArgument(m_origin, m_operand, m_value, m_setMode);
1177 return parser->setLocal(m_origin, m_operand, m_value, m_setMode);
1181 Vector<DelayedSetLocal, 2> m_setLocalQueue;
1183 const Instruction* m_currentInstruction;
1184 bool m_hasDebuggerEnabled;
1185 bool m_hasAnyForceOSRExits { false };
1188 BasicBlock* ByteCodeParser::allocateTargetableBlock(unsigned bytecodeIndex)
1190 ASSERT(bytecodeIndex != UINT_MAX);
1191 Ref<BasicBlock> block = adoptRef(*new BasicBlock(bytecodeIndex, m_numArguments, m_numLocals, 1));
1192 BasicBlock* blockPtr = block.ptr();
1193 // m_blockLinkingTargets must always be sorted in increasing order of bytecodeBegin
1194 if (m_inlineStackTop->m_blockLinkingTargets.size())
1195 ASSERT(m_inlineStackTop->m_blockLinkingTargets.last()->bytecodeBegin < bytecodeIndex);
1196 m_inlineStackTop->m_blockLinkingTargets.append(blockPtr);
1197 m_graph.appendBlock(WTFMove(block));
1201 BasicBlock* ByteCodeParser::allocateUntargetableBlock()
1203 Ref<BasicBlock> block = adoptRef(*new BasicBlock(UINT_MAX, m_numArguments, m_numLocals, 1));
1204 BasicBlock* blockPtr = block.ptr();
1205 m_graph.appendBlock(WTFMove(block));
1209 void ByteCodeParser::makeBlockTargetable(BasicBlock* block, unsigned bytecodeIndex)
1211 RELEASE_ASSERT(block->bytecodeBegin == UINT_MAX);
1212 block->bytecodeBegin = bytecodeIndex;
1213 // m_blockLinkingTargets must always be sorted in increasing order of bytecodeBegin
1214 if (m_inlineStackTop->m_blockLinkingTargets.size())
1215 ASSERT(m_inlineStackTop->m_blockLinkingTargets.last()->bytecodeBegin < bytecodeIndex);
1216 m_inlineStackTop->m_blockLinkingTargets.append(block);
1219 void ByteCodeParser::addJumpTo(BasicBlock* block)
1221 ASSERT(!m_currentBlock->terminal());
1222 Node* jumpNode = addToGraph(Jump);
1223 jumpNode->targetBlock() = block;
1224 m_currentBlock->didLink();
1227 void ByteCodeParser::addJumpTo(unsigned bytecodeIndex)
1229 ASSERT(!m_currentBlock->terminal());
1230 addToGraph(Jump, OpInfo(bytecodeIndex));
1231 m_inlineStackTop->m_unlinkedBlocks.append(m_currentBlock);
1234 template<typename CallOp>
1235 ByteCodeParser::Terminality ByteCodeParser::handleCall(const Instruction* pc, NodeType op, CallMode callMode)
1237 auto bytecode = pc->as<CallOp>();
1238 Node* callTarget = get(bytecode.m_callee);
1239 int registerOffset = -static_cast<int>(bytecode.m_argv);
1241 CallLinkStatus callLinkStatus = CallLinkStatus::computeFor(
1242 m_inlineStackTop->m_profiledBlock, currentCodeOrigin(),
1243 m_inlineStackTop->m_baselineMap, m_icContextStack);
1245 InlineCallFrame::Kind kind = InlineCallFrame::kindFor(callMode);
1247 return handleCall(bytecode.m_dst, op, kind, pc->size(), callTarget,
1248 bytecode.m_argc, registerOffset, callLinkStatus, getPrediction());
1251 void ByteCodeParser::refineStatically(CallLinkStatus& callLinkStatus, Node* callTarget)
1253 if (callTarget->isCellConstant())
1254 callLinkStatus.setProvenConstantCallee(CallVariant(callTarget->asCell()));
1257 ByteCodeParser::Terminality ByteCodeParser::handleCall(
1258 VirtualRegister result, NodeType op, InlineCallFrame::Kind kind, unsigned instructionSize,
1259 Node* callTarget, int argumentCountIncludingThis, int registerOffset,
1260 CallLinkStatus callLinkStatus, SpeculatedType prediction)
1262 ASSERT(registerOffset <= 0);
1264 refineStatically(callLinkStatus, callTarget);
1266 VERBOSE_LOG(" Handling call at ", currentCodeOrigin(), ": ", callLinkStatus, "\n");
1268 // If we have profiling information about this call, and it did not behave too polymorphically,
1269 // we may be able to inline it, or in the case of recursive tail calls turn it into a jump.
1270 if (callLinkStatus.canOptimize()) {
1271 addToGraph(FilterCallLinkStatus, OpInfo(m_graph.m_plan.recordedStatuses().addCallLinkStatus(currentCodeOrigin(), callLinkStatus)), callTarget);
1273 VirtualRegister thisArgument = virtualRegisterForArgument(0, registerOffset);
1274 auto optimizationResult = handleInlining(callTarget, result, callLinkStatus, registerOffset, thisArgument,
1275 argumentCountIncludingThis, m_currentIndex + instructionSize, op, kind, prediction);
1276 if (optimizationResult == CallOptimizationResult::OptimizedToJump)
1278 if (optimizationResult == CallOptimizationResult::Inlined) {
1279 if (UNLIKELY(m_graph.compilation()))
1280 m_graph.compilation()->noticeInlinedCall();
1285 Node* callNode = addCall(result, op, nullptr, callTarget, argumentCountIncludingThis, registerOffset, prediction);
1286 ASSERT(callNode->op() != TailCallVarargs && callNode->op() != TailCallForwardVarargs);
1287 return callNode->op() == TailCall ? Terminal : NonTerminal;
1290 template<typename CallOp>
1291 ByteCodeParser::Terminality ByteCodeParser::handleVarargsCall(const Instruction* pc, NodeType op, CallMode callMode)
1293 auto bytecode = pc->as<CallOp>();
1294 int firstFreeReg = bytecode.m_firstFree.offset();
1295 int firstVarArgOffset = bytecode.m_firstVarArg;
1297 SpeculatedType prediction = getPrediction();
1299 Node* callTarget = get(bytecode.m_callee);
1301 CallLinkStatus callLinkStatus = CallLinkStatus::computeFor(
1302 m_inlineStackTop->m_profiledBlock, currentCodeOrigin(),
1303 m_inlineStackTop->m_baselineMap, m_icContextStack);
1304 refineStatically(callLinkStatus, callTarget);
1306 VERBOSE_LOG(" Varargs call link status at ", currentCodeOrigin(), ": ", callLinkStatus, "\n");
1308 if (callLinkStatus.canOptimize()) {
1309 addToGraph(FilterCallLinkStatus, OpInfo(m_graph.m_plan.recordedStatuses().addCallLinkStatus(currentCodeOrigin(), callLinkStatus)), callTarget);
1311 if (handleVarargsInlining(callTarget, bytecode.m_dst,
1312 callLinkStatus, firstFreeReg, bytecode.m_thisValue, bytecode.m_arguments,
1313 firstVarArgOffset, op,
1314 InlineCallFrame::varargsKindFor(callMode))) {
1315 if (UNLIKELY(m_graph.compilation()))
1316 m_graph.compilation()->noticeInlinedCall();
1321 CallVarargsData* data = m_graph.m_callVarargsData.add();
1322 data->firstVarArgOffset = firstVarArgOffset;
1324 Node* thisChild = get(bytecode.m_thisValue);
1325 Node* argumentsChild = nullptr;
1326 if (op != TailCallForwardVarargs)
1327 argumentsChild = get(bytecode.m_arguments);
1329 if (op == TailCallVarargs || op == TailCallForwardVarargs) {
1330 if (allInlineFramesAreTailCalls()) {
1331 addToGraph(op, OpInfo(data), OpInfo(), callTarget, thisChild, argumentsChild);
1334 op = op == TailCallVarargs ? TailCallVarargsInlinedCaller : TailCallForwardVarargsInlinedCaller;
1337 Node* call = addToGraph(op, OpInfo(data), OpInfo(prediction), callTarget, thisChild, argumentsChild);
1338 if (bytecode.m_dst.isValid())
1339 set(bytecode.m_dst, call);
1343 void ByteCodeParser::emitFunctionChecks(CallVariant callee, Node* callTarget, VirtualRegister thisArgumentReg)
1346 if (thisArgumentReg.isValid())
1347 thisArgument = get(thisArgumentReg);
1349 thisArgument = nullptr;
1352 Node* callTargetForCheck;
1353 if (callee.isClosureCall()) {
1354 calleeCell = callee.executable();
1355 callTargetForCheck = addToGraph(GetExecutable, callTarget);
1357 calleeCell = callee.nonExecutableCallee();
1358 callTargetForCheck = callTarget;
1362 addToGraph(CheckCell, OpInfo(m_graph.freeze(calleeCell)), callTargetForCheck);
1364 addToGraph(Phantom, thisArgument);
1367 Node* ByteCodeParser::getArgumentCount()
1369 Node* argumentCount;
1370 if (m_inlineStackTop->m_inlineCallFrame && !m_inlineStackTop->m_inlineCallFrame->isVarargs())
1371 argumentCount = jsConstant(m_graph.freeze(jsNumber(m_inlineStackTop->m_inlineCallFrame->argumentCountIncludingThis))->value());
1373 argumentCount = addToGraph(GetArgumentCountIncludingThis, OpInfo(m_inlineStackTop->m_inlineCallFrame), OpInfo(SpecInt32Only));
1374 return argumentCount;
1377 void ByteCodeParser::emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis)
1379 for (int i = 0; i < argumentCountIncludingThis; ++i)
1380 addToGraph(Phantom, get(virtualRegisterForArgument(i, registerOffset)));
1383 template<typename ChecksFunctor>
1384 bool ByteCodeParser::handleRecursiveTailCall(Node* callTargetNode, CallVariant callVariant, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& emitFunctionCheckIfNeeded)
1386 if (UNLIKELY(!Options::optimizeRecursiveTailCalls()))
1389 auto targetExecutable = callVariant.executable();
1390 InlineStackEntry* stackEntry = m_inlineStackTop;
1392 if (targetExecutable != stackEntry->executable())
1394 VERBOSE_LOG(" We found a recursive tail call, trying to optimize it into a jump.\n");
1396 if (auto* callFrame = stackEntry->m_inlineCallFrame) {
1397 // Some code may statically use the argument count from the InlineCallFrame, so it would be invalid to loop back if it does not match.
1398 // We "continue" instead of returning false in case another stack entry further on the stack has the right number of arguments.
1399 if (argumentCountIncludingThis != static_cast<int>(callFrame->argumentCountIncludingThis))
1402 // We are in the machine code entry (i.e. the original caller).
1403 // If we have more arguments than the number of parameters to the function, it is not clear where we could put them on the stack.
1404 if (argumentCountIncludingThis > m_codeBlock->numParameters())
1408 // If an InlineCallFrame is not a closure, it was optimized using a constant callee.
1409 // Check if this is the same callee that we try to inline here.
1410 if (stackEntry->m_inlineCallFrame && !stackEntry->m_inlineCallFrame->isClosureCall) {
1411 if (stackEntry->m_inlineCallFrame->calleeConstant() != callVariant.function())
1415 // We must add some check that the profiling information was correct and the target of this call is what we thought.
1416 emitFunctionCheckIfNeeded();
1417 // We flush everything, as if we were in the backedge of a loop (see treatment of op_jmp in parseBlock).
1420 // We must set the callee to the right value
1421 if (stackEntry->m_inlineCallFrame) {
1422 if (stackEntry->m_inlineCallFrame->isClosureCall)
1423 setDirect(stackEntry->remapOperand(VirtualRegister(CallFrameSlot::callee)), callTargetNode, NormalSet);
1425 addToGraph(SetCallee, callTargetNode);
1427 // We must set the arguments to the right values
1428 if (!stackEntry->m_inlineCallFrame)
1429 addToGraph(SetArgumentCountIncludingThis, OpInfo(argumentCountIncludingThis));
1431 for (; argIndex < argumentCountIncludingThis; ++argIndex) {
1432 Node* value = get(virtualRegisterForArgument(argIndex, registerOffset));
1433 setDirect(stackEntry->remapOperand(virtualRegisterForArgument(argIndex)), value, NormalSet);
1435 Node* undefined = addToGraph(JSConstant, OpInfo(m_constantUndefined));
1436 for (; argIndex < stackEntry->m_codeBlock->numParameters(); ++argIndex)
1437 setDirect(stackEntry->remapOperand(virtualRegisterForArgument(argIndex)), undefined, NormalSet);
1439 // We must repeat the work of op_enter here as we will jump right after it.
1440 // We jump right after it and not before it, because of some invariant saying that a CFG root cannot have predecessors in the IR.
1441 for (int i = 0; i < stackEntry->m_codeBlock->numVars(); ++i)
1442 setDirect(stackEntry->remapOperand(virtualRegisterForLocal(i)), undefined, NormalSet);
1444 // We want to emit the SetLocals with an exit origin that points to the place we are jumping to.
1445 unsigned oldIndex = m_currentIndex;
1446 auto oldStackTop = m_inlineStackTop;
1447 m_inlineStackTop = stackEntry;
1448 m_currentIndex = opcodeLengths[op_enter];
1450 processSetLocalQueue();
1451 m_currentIndex = oldIndex;
1452 m_inlineStackTop = oldStackTop;
1455 BasicBlock** entryBlockPtr = tryBinarySearch<BasicBlock*, unsigned>(stackEntry->m_blockLinkingTargets, stackEntry->m_blockLinkingTargets.size(), opcodeLengths[op_enter], getBytecodeBeginForBlock);
1456 RELEASE_ASSERT(entryBlockPtr);
1457 addJumpTo(*entryBlockPtr);
1459 // It would be unsound to jump over a non-tail call: the "tail" call is not really a tail call in that case.
1460 } while (stackEntry->m_inlineCallFrame && stackEntry->m_inlineCallFrame->kind == InlineCallFrame::TailCall && (stackEntry = stackEntry->m_caller));
1462 // The tail call was not recursive
1466 unsigned ByteCodeParser::inliningCost(CallVariant callee, int argumentCountIncludingThis, InlineCallFrame::Kind kind)
1468 CallMode callMode = InlineCallFrame::callModeFor(kind);
1469 CodeSpecializationKind specializationKind = specializationKindFor(callMode);
1470 VERBOSE_LOG("Considering inlining ", callee, " into ", currentCodeOrigin(), "\n");
1472 if (m_hasDebuggerEnabled) {
1473 VERBOSE_LOG(" Failing because the debugger is in use.\n");
1477 FunctionExecutable* executable = callee.functionExecutable();
1479 VERBOSE_LOG(" Failing because there is no function executable.\n");
1483 // Do we have a code block, and does the code block's size match the heuristics/requirements for
1484 // being an inline candidate? We might not have a code block (1) if code was thrown away,
1485 // (2) if we simply hadn't actually made this call yet or (3) code is a builtin function and
1486 // specialization kind is construct. In the former 2 cases, we could still theoretically attempt
1487 // to inline it if we had a static proof of what was being called; this might happen for example
1488 // if you call a global function, where watchpointing gives us static information. Overall,
1489 // it's a rare case because we expect that any hot callees would have already been compiled.
1490 CodeBlock* codeBlock = executable->baselineCodeBlockFor(specializationKind);
1492 VERBOSE_LOG(" Failing because no code block available.\n");
1496 if (!Options::useArityFixupInlining()) {
1497 if (codeBlock->numParameters() > argumentCountIncludingThis) {
1498 VERBOSE_LOG(" Failing because of arity mismatch.\n");
1503 CapabilityLevel capabilityLevel = inlineFunctionForCapabilityLevel(
1504 codeBlock, specializationKind, callee.isClosureCall());
1505 VERBOSE_LOG(" Call mode: ", callMode, "\n");
1506 VERBOSE_LOG(" Is closure call: ", callee.isClosureCall(), "\n");
1507 VERBOSE_LOG(" Capability level: ", capabilityLevel, "\n");
1508 VERBOSE_LOG(" Might inline function: ", mightInlineFunctionFor(codeBlock, specializationKind), "\n");
1509 VERBOSE_LOG(" Might compile function: ", mightCompileFunctionFor(codeBlock, specializationKind), "\n");
1510 VERBOSE_LOG(" Is supported for inlining: ", isSupportedForInlining(codeBlock), "\n");
1511 VERBOSE_LOG(" Is inlining candidate: ", codeBlock->ownerExecutable()->isInliningCandidate(), "\n");
1512 if (!canInline(capabilityLevel)) {
1513 VERBOSE_LOG(" Failing because the function is not inlineable.\n");
1517 // Check if the caller is already too large. We do this check here because that's just
1518 // where we happen to also have the callee's code block, and we want that for the
1519 // purpose of unsetting SABI.
1520 if (!isSmallEnoughToInlineCodeInto(m_codeBlock)) {
1521 codeBlock->m_shouldAlwaysBeInlined = false;
1522 VERBOSE_LOG(" Failing because the caller is too large.\n");
1526 // FIXME: this should be better at predicting how much bloat we will introduce by inlining
1528 // https://bugs.webkit.org/show_bug.cgi?id=127627
1530 // FIXME: We currently inline functions that have run in LLInt but not in Baseline. These
1531 // functions have very low fidelity profiling, and presumably they weren't very hot if they
1532 // haven't gotten to Baseline yet. Consider not inlining these functions.
1533 // https://bugs.webkit.org/show_bug.cgi?id=145503
1535 // Have we exceeded inline stack depth, or are we trying to inline a recursive call to
1536 // too many levels? If either of these are detected, then don't inline. We adjust our
1537 // heuristics if we are dealing with a function that cannot otherwise be compiled.
1540 unsigned recursion = 0;
1542 for (InlineStackEntry* entry = m_inlineStackTop; entry; entry = entry->m_caller) {
1544 if (depth >= Options::maximumInliningDepth()) {
1545 VERBOSE_LOG(" Failing because depth exceeded.\n");
1549 if (entry->executable() == executable) {
1551 if (recursion >= Options::maximumInliningRecursion()) {
1552 VERBOSE_LOG(" Failing because recursion detected.\n");
1558 VERBOSE_LOG(" Inlining should be possible.\n");
1560 // It might be possible to inline.
1561 return codeBlock->bytecodeCost();
1564 template<typename ChecksFunctor>
1565 void ByteCodeParser::inlineCall(Node* callTargetNode, VirtualRegister result, CallVariant callee, int registerOffset, int argumentCountIncludingThis, InlineCallFrame::Kind kind, BasicBlock* continuationBlock, const ChecksFunctor& insertChecks)
1567 const Instruction* savedCurrentInstruction = m_currentInstruction;
1568 CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1570 ASSERT(inliningCost(callee, argumentCountIncludingThis, kind) != UINT_MAX);
1572 CodeBlock* codeBlock = callee.functionExecutable()->baselineCodeBlockFor(specializationKind);
1573 insertChecks(codeBlock);
1575 // FIXME: Don't flush constants!
1577 // arityFixupCount and numberOfStackPaddingSlots are different. While arityFixupCount does not consider about stack alignment,
1578 // numberOfStackPaddingSlots consider alignment. Consider the following case,
1580 // before: [ ... ][arg0][header]
1581 // after: [ ... ][ext ][arg1][arg0][header]
1583 // In the above case, arityFixupCount is 1. But numberOfStackPaddingSlots is 2 because the stack needs to be aligned.
1584 // We insert extra slots to align stack.
1585 int arityFixupCount = std::max<int>(codeBlock->numParameters() - argumentCountIncludingThis, 0);
1586 int numberOfStackPaddingSlots = CommonSlowPaths::numberOfStackPaddingSlots(codeBlock, argumentCountIncludingThis);
1587 ASSERT(!(numberOfStackPaddingSlots % stackAlignmentRegisters()));
1588 int registerOffsetAfterFixup = registerOffset - numberOfStackPaddingSlots;
1590 int inlineCallFrameStart = m_inlineStackTop->remapOperand(VirtualRegister(registerOffsetAfterFixup)).offset() + CallFrame::headerSizeInRegisters;
1593 VirtualRegister(inlineCallFrameStart).toLocal() + 1 +
1594 CallFrame::headerSizeInRegisters + codeBlock->numCalleeLocals());
1596 size_t argumentPositionStart = m_graph.m_argumentPositions.size();
1598 if (result.isValid())
1599 result = m_inlineStackTop->remapOperand(result);
1601 VariableAccessData* calleeVariable = nullptr;
1602 if (callee.isClosureCall()) {
1603 Node* calleeSet = set(
1604 VirtualRegister(registerOffsetAfterFixup + CallFrameSlot::callee), callTargetNode, ImmediateNakedSet);
1606 calleeVariable = calleeSet->variableAccessData();
1607 calleeVariable->mergeShouldNeverUnbox(true);
1610 InlineStackEntry* callerStackTop = m_inlineStackTop;
1611 InlineStackEntry inlineStackEntry(this, codeBlock, codeBlock, callee.function(), result,
1612 (VirtualRegister)inlineCallFrameStart, argumentCountIncludingThis, kind, continuationBlock);
1614 // This is where the actual inlining really happens.
1615 unsigned oldIndex = m_currentIndex;
1619 case InlineCallFrame::GetterCall:
1620 case InlineCallFrame::SetterCall: {
1621 // When inlining getter and setter calls, we setup a stack frame which does not appear in the bytecode.
1622 // Because Inlining can switch on executable, we could have a graph like this.
1627 // 31: MovHint(loc10)
1628 // 32: SetLocal(loc10)
1629 // 33: MovHint(loc9)
1630 // 34: SetLocal(loc9)
1632 // 37: GetExecutable(@30)
1637 // 42: GetLocal(loc12, bc#7 of caller)
1639 // --> callee: loc9 and loc10 are arguments of callee.
1641 // <HERE, exit to callee, loc9 and loc10 are required in the bytecode>
1643 // When we prune OSR availability at the beginning of BB#2 (bc#7 in the caller), we prune loc9 and loc10's liveness because the caller does not actually have loc9 and loc10.
1644 // However, when we begin executing the callee, we need OSR exit to be aware of where it can recover the arguments to the setter, loc9 and loc10. The MovHints in the inlined
1645 // callee make it so that if we exit at <HERE>, we can recover loc9 and loc10.
1646 for (int index = 0; index < argumentCountIncludingThis; ++index) {
1647 VirtualRegister argumentToGet = callerStackTop->remapOperand(virtualRegisterForArgument(index, registerOffset));
1648 Node* value = getDirect(argumentToGet);
1649 addToGraph(MovHint, OpInfo(argumentToGet.offset()), value);
1650 m_setLocalQueue.append(DelayedSetLocal { currentCodeOrigin(), argumentToGet, value, ImmediateNakedSet });
1658 if (arityFixupCount) {
1659 // Note: we do arity fixup in two phases:
1660 // 1. We get all the values we need and MovHint them to the expected locals.
1661 // 2. We SetLocal them after that. This way, if we exit, the callee's
1662 // frame is already set up. If any SetLocal exits, we have a valid exit state.
1663 // This is required because if we didn't do this in two phases, we may exit in
1664 // the middle of arity fixup from the callee's CodeOrigin. This is unsound because exited
1665 // code does not have arity fixup so that remaining necessary fixups are not executed.
1666 // For example, consider if we need to pad two args:
1667 // [arg3][arg2][arg1][arg0]
1668 // [fix ][fix ][arg3][arg2][arg1][arg0]
1669 // We memcpy starting from arg0 in the direction of arg3. If we were to exit at a type check
1670 // for arg3's SetLocal in the callee's CodeOrigin, we'd exit with a frame like so:
1671 // [arg3][arg2][arg1][arg2][arg1][arg0]
1672 // Since we do not perform arity fixup in the callee, this is the frame used by the callee.
1673 // And the callee would then just end up thinking its argument are:
1674 // [fix ][fix ][arg3][arg2][arg1][arg0]
1675 // which is incorrect.
1677 Node* undefined = addToGraph(JSConstant, OpInfo(m_constantUndefined));
1678 // The stack needs to be aligned due to the JS calling convention. Thus, we have a hole if the count of arguments is not aligned.
1679 // We call this hole "extra slot". Consider the following case, the number of arguments is 2. If this argument
1680 // count does not fulfill the stack alignment requirement, we already inserted extra slots.
1682 // before: [ ... ][ext ][arg1][arg0][header]
1684 // In the above case, one extra slot is inserted. If the code's parameter count is 3, we will fixup arguments.
1685 // At that time, we can simply use this extra slots. So the fixuped stack is the following.
1687 // before: [ ... ][ext ][arg1][arg0][header]
1688 // after: [ ... ][arg2][arg1][arg0][header]
1690 // In such cases, we do not need to move frames.
1691 if (registerOffsetAfterFixup != registerOffset) {
1692 for (int index = 0; index < argumentCountIncludingThis; ++index) {
1693 VirtualRegister argumentToGet = callerStackTop->remapOperand(virtualRegisterForArgument(index, registerOffset));
1694 Node* value = getDirect(argumentToGet);
1695 VirtualRegister argumentToSet = m_inlineStackTop->remapOperand(virtualRegisterForArgument(index));
1696 addToGraph(MovHint, OpInfo(argumentToSet.offset()), value);
1697 m_setLocalQueue.append(DelayedSetLocal { currentCodeOrigin(), argumentToSet, value, ImmediateNakedSet });
1700 for (int index = 0; index < arityFixupCount; ++index) {
1701 VirtualRegister argumentToSet = m_inlineStackTop->remapOperand(virtualRegisterForArgument(argumentCountIncludingThis + index));
1702 addToGraph(MovHint, OpInfo(argumentToSet.offset()), undefined);
1703 m_setLocalQueue.append(DelayedSetLocal { currentCodeOrigin(), argumentToSet, undefined, ImmediateNakedSet });
1706 // At this point, it's OK to OSR exit because we finished setting up
1707 // our callee's frame. We emit an ExitOK below.
1710 // At this point, it's again OK to OSR exit.
1714 processSetLocalQueue();
1716 InlineVariableData inlineVariableData;
1717 inlineVariableData.inlineCallFrame = m_inlineStackTop->m_inlineCallFrame;
1718 inlineVariableData.argumentPositionStart = argumentPositionStart;
1719 inlineVariableData.calleeVariable = 0;
1722 m_inlineStackTop->m_inlineCallFrame->isClosureCall
1723 == callee.isClosureCall());
1724 if (callee.isClosureCall()) {
1725 RELEASE_ASSERT(calleeVariable);
1726 inlineVariableData.calleeVariable = calleeVariable;
1729 m_graph.m_inlineVariableData.append(inlineVariableData);
1732 clearCaches(); // Reset our state now that we're back to the outer code.
1734 m_currentIndex = oldIndex;
1737 linkBlocks(inlineStackEntry.m_unlinkedBlocks, inlineStackEntry.m_blockLinkingTargets);
1739 // Most functions have at least one op_ret and thus set up the continuation block.
1740 // In some rare cases, a function ends in op_unreachable, forcing us to allocate a new continuationBlock here.
1741 if (inlineStackEntry.m_continuationBlock)
1742 m_currentBlock = inlineStackEntry.m_continuationBlock;
1744 m_currentBlock = allocateUntargetableBlock();
1745 ASSERT(!m_currentBlock->terminal());
1747 prepareToParseBlock();
1748 m_currentInstruction = savedCurrentInstruction;
1751 ByteCodeParser::CallOptimizationResult ByteCodeParser::handleCallVariant(Node* callTargetNode, VirtualRegister result, CallVariant callee, int registerOffset, VirtualRegister thisArgument, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind kind, SpeculatedType prediction, unsigned& inliningBalance, BasicBlock* continuationBlock, bool needsToCheckCallee)
1753 VERBOSE_LOG(" Considering callee ", callee, "\n");
1755 bool didInsertChecks = false;
1756 auto insertChecksWithAccounting = [&] () {
1757 if (needsToCheckCallee)
1758 emitFunctionChecks(callee, callTargetNode, thisArgument);
1759 didInsertChecks = true;
1762 if (kind == InlineCallFrame::TailCall && ByteCodeParser::handleRecursiveTailCall(callTargetNode, callee, registerOffset, argumentCountIncludingThis, insertChecksWithAccounting)) {
1763 RELEASE_ASSERT(didInsertChecks);
1764 return CallOptimizationResult::OptimizedToJump;
1766 RELEASE_ASSERT(!didInsertChecks);
1768 if (!inliningBalance)
1769 return CallOptimizationResult::DidNothing;
1771 CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1773 auto endSpecialCase = [&] () {
1774 RELEASE_ASSERT(didInsertChecks);
1775 addToGraph(Phantom, callTargetNode);
1776 emitArgumentPhantoms(registerOffset, argumentCountIncludingThis);
1778 if (continuationBlock) {
1779 m_currentIndex = nextOffset;
1781 processSetLocalQueue();
1782 addJumpTo(continuationBlock);
1786 if (InternalFunction* function = callee.internalFunction()) {
1787 if (handleConstantInternalFunction(callTargetNode, result, function, registerOffset, argumentCountIncludingThis, specializationKind, prediction, insertChecksWithAccounting)) {
1789 return CallOptimizationResult::Inlined;
1791 RELEASE_ASSERT(!didInsertChecks);
1792 return CallOptimizationResult::DidNothing;
1795 Intrinsic intrinsic = callee.intrinsicFor(specializationKind);
1796 if (intrinsic != NoIntrinsic) {
1797 if (handleIntrinsicCall(callTargetNode, result, intrinsic, registerOffset, argumentCountIncludingThis, prediction, insertChecksWithAccounting)) {
1799 return CallOptimizationResult::Inlined;
1801 RELEASE_ASSERT(!didInsertChecks);
1802 // We might still try to inline the Intrinsic because it might be a builtin JS function.
1805 if (Options::useDOMJIT()) {
1806 if (const DOMJIT::Signature* signature = callee.signatureFor(specializationKind)) {
1807 if (handleDOMJITCall(callTargetNode, result, signature, registerOffset, argumentCountIncludingThis, prediction, insertChecksWithAccounting)) {
1809 return CallOptimizationResult::Inlined;
1811 RELEASE_ASSERT(!didInsertChecks);
1815 unsigned myInliningCost = inliningCost(callee, argumentCountIncludingThis, kind);
1816 if (myInliningCost > inliningBalance)
1817 return CallOptimizationResult::DidNothing;
1819 auto insertCheck = [&] (CodeBlock*) {
1820 if (needsToCheckCallee)
1821 emitFunctionChecks(callee, callTargetNode, thisArgument);
1823 inlineCall(callTargetNode, result, callee, registerOffset, argumentCountIncludingThis, kind, continuationBlock, insertCheck);
1824 inliningBalance -= myInliningCost;
1825 return CallOptimizationResult::Inlined;
1828 bool ByteCodeParser::handleVarargsInlining(Node* callTargetNode, VirtualRegister result,
1829 const CallLinkStatus& callLinkStatus, int firstFreeReg, VirtualRegister thisArgument,
1830 VirtualRegister argumentsArgument, unsigned argumentsOffset,
1831 NodeType callOp, InlineCallFrame::Kind kind)
1833 VERBOSE_LOG("Handling inlining (Varargs)...\nStack: ", currentCodeOrigin(), "\n");
1834 if (callLinkStatus.maxNumArguments() > Options::maximumVarargsForInlining()) {
1835 VERBOSE_LOG("Bailing inlining: too many arguments for varargs inlining.\n");
1838 if (callLinkStatus.couldTakeSlowPath() || callLinkStatus.size() != 1) {
1839 VERBOSE_LOG("Bailing inlining: polymorphic inlining is not yet supported for varargs.\n");
1843 CallVariant callVariant = callLinkStatus[0];
1845 unsigned mandatoryMinimum;
1846 if (FunctionExecutable* functionExecutable = callVariant.functionExecutable())
1847 mandatoryMinimum = functionExecutable->parameterCount();
1849 mandatoryMinimum = 0;
1852 unsigned maxNumArguments = std::max(callLinkStatus.maxNumArguments(), mandatoryMinimum + 1);
1854 CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1855 if (inliningCost(callVariant, maxNumArguments, kind) > getInliningBalance(callLinkStatus, specializationKind)) {
1856 VERBOSE_LOG("Bailing inlining: inlining cost too high.\n");
1860 int registerOffset = firstFreeReg + 1;
1861 registerOffset -= maxNumArguments; // includes "this"
1862 registerOffset -= CallFrame::headerSizeInRegisters;
1863 registerOffset = -WTF::roundUpToMultipleOf(stackAlignmentRegisters(), -registerOffset);
1865 auto insertChecks = [&] (CodeBlock* codeBlock) {
1866 emitFunctionChecks(callVariant, callTargetNode, thisArgument);
1868 int remappedRegisterOffset =
1869 m_inlineStackTop->remapOperand(VirtualRegister(registerOffset)).offset();
1871 ensureLocals(VirtualRegister(remappedRegisterOffset).toLocal());
1873 int argumentStart = registerOffset + CallFrame::headerSizeInRegisters;
1874 int remappedArgumentStart = m_inlineStackTop->remapOperand(VirtualRegister(argumentStart)).offset();
1876 LoadVarargsData* data = m_graph.m_loadVarargsData.add();
1877 data->start = VirtualRegister(remappedArgumentStart + 1);
1878 data->count = VirtualRegister(remappedRegisterOffset + CallFrameSlot::argumentCount);
1879 data->offset = argumentsOffset;
1880 data->limit = maxNumArguments;
1881 data->mandatoryMinimum = mandatoryMinimum;
1883 if (callOp == TailCallForwardVarargs)
1884 addToGraph(ForwardVarargs, OpInfo(data));
1886 addToGraph(LoadVarargs, OpInfo(data), get(argumentsArgument));
1888 // LoadVarargs may OSR exit. Hence, we need to keep alive callTargetNode, thisArgument
1889 // and argumentsArgument for the baseline JIT. However, we only need a Phantom for
1890 // callTargetNode because the other 2 are still in use and alive at this point.
1891 addToGraph(Phantom, callTargetNode);
1893 // In DFG IR before SSA, we cannot insert control flow between after the
1894 // LoadVarargs and the last SetArgumentDefinitely. This isn't a problem once we get to DFG
1895 // SSA. Fortunately, we also have other reasons for not inserting control flow
1898 VariableAccessData* countVariable = newVariableAccessData(VirtualRegister(remappedRegisterOffset + CallFrameSlot::argumentCount));
1899 // This is pretty lame, but it will force the count to be flushed as an int. This doesn't
1900 // matter very much, since our use of a SetArgumentDefinitely and Flushes for this local slot is
1901 // mostly just a formality.
1902 countVariable->predict(SpecInt32Only);
1903 countVariable->mergeIsProfitableToUnbox(true);
1904 Node* setArgumentCount = addToGraph(SetArgumentDefinitely, OpInfo(countVariable));
1905 m_currentBlock->variablesAtTail.setOperand(countVariable->local(), setArgumentCount);
1907 set(VirtualRegister(argumentStart), get(thisArgument), ImmediateNakedSet);
1908 unsigned numSetArguments = 0;
1909 for (unsigned argument = 1; argument < maxNumArguments; ++argument) {
1910 VariableAccessData* variable = newVariableAccessData(VirtualRegister(remappedArgumentStart + argument));
1911 variable->mergeShouldNeverUnbox(true); // We currently have nowhere to put the type check on the LoadVarargs. LoadVarargs is effectful, so after it finishes, we cannot exit.
1913 // For a while it had been my intention to do things like this inside the
1914 // prediction injection phase. But in this case it's really best to do it here,
1915 // because it's here that we have access to the variable access datas for the
1916 // inlining we're about to do.
1918 // Something else that's interesting here is that we'd really love to get
1919 // predictions from the arguments loaded at the callsite, rather than the
1920 // arguments received inside the callee. But that probably won't matter for most
1922 if (codeBlock && argument < static_cast<unsigned>(codeBlock->numParameters())) {
1923 ConcurrentJSLocker locker(codeBlock->m_lock);
1924 ValueProfile& profile = codeBlock->valueProfileForArgument(argument);
1925 variable->predict(profile.computeUpdatedPrediction(locker));
1928 Node* setArgument = addToGraph(numSetArguments >= mandatoryMinimum ? SetArgumentMaybe : SetArgumentDefinitely, OpInfo(variable));
1929 m_currentBlock->variablesAtTail.setOperand(variable->local(), setArgument);
1934 // Intrinsics and internal functions can only be inlined if we're not doing varargs. This is because
1935 // we currently don't have any way of getting profiling information for arguments to non-JS varargs
1936 // calls. The prediction propagator won't be of any help because LoadVarargs obscures the data flow,
1937 // and there are no callsite value profiles and native function won't have callee value profiles for
1938 // those arguments. Even worse, if the intrinsic decides to exit, it won't really have anywhere to
1939 // exit to: LoadVarargs is effectful and it's part of the op_call_varargs, so we can't exit without
1940 // calling LoadVarargs twice.
1941 inlineCall(callTargetNode, result, callVariant, registerOffset, maxNumArguments, kind, nullptr, insertChecks);
1944 VERBOSE_LOG("Successful inlining (varargs, monomorphic).\nStack: ", currentCodeOrigin(), "\n");
1948 unsigned ByteCodeParser::getInliningBalance(const CallLinkStatus& callLinkStatus, CodeSpecializationKind specializationKind)
1950 unsigned inliningBalance = Options::maximumFunctionForCallInlineCandidateBytecodeCost();
1951 if (specializationKind == CodeForConstruct)
1952 inliningBalance = std::min(inliningBalance, Options::maximumFunctionForConstructInlineCandidateBytecoodeCost());
1953 if (callLinkStatus.isClosureCall())
1954 inliningBalance = std::min(inliningBalance, Options::maximumFunctionForClosureCallInlineCandidateBytecodeCost());
1955 return inliningBalance;
1958 ByteCodeParser::CallOptimizationResult ByteCodeParser::handleInlining(
1959 Node* callTargetNode, VirtualRegister result, const CallLinkStatus& callLinkStatus,
1960 int registerOffset, VirtualRegister thisArgument,
1961 int argumentCountIncludingThis,
1962 unsigned nextOffset, NodeType callOp, InlineCallFrame::Kind kind, SpeculatedType prediction)
1964 VERBOSE_LOG("Handling inlining...\nStack: ", currentCodeOrigin(), "\n");
1966 CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1967 unsigned inliningBalance = getInliningBalance(callLinkStatus, specializationKind);
1969 // First check if we can avoid creating control flow. Our inliner does some CFG
1970 // simplification on the fly and this helps reduce compile times, but we can only leverage
1971 // this in cases where we don't need control flow diamonds to check the callee.
1972 if (!callLinkStatus.couldTakeSlowPath() && callLinkStatus.size() == 1) {
1973 return handleCallVariant(
1974 callTargetNode, result, callLinkStatus[0], registerOffset, thisArgument,
1975 argumentCountIncludingThis, nextOffset, kind, prediction, inliningBalance, nullptr, true);
1978 // We need to create some kind of switch over callee. For now we only do this if we believe that
1979 // we're in the top tier. We have two reasons for this: first, it provides us an opportunity to
1980 // do more detailed polyvariant/polymorphic profiling; and second, it reduces compile times in
1981 // the DFG. And by polyvariant profiling we mean polyvariant profiling of *this* call. Note that
1982 // we could improve that aspect of this by doing polymorphic inlining but having the profiling
1984 if (!m_graph.m_plan.isFTL() || !Options::usePolymorphicCallInlining()) {
1985 VERBOSE_LOG("Bailing inlining (hard).\nStack: ", currentCodeOrigin(), "\n");
1986 return CallOptimizationResult::DidNothing;
1989 // If the claim is that this did not originate from a stub, then we don't want to emit a switch
1990 // statement. Whenever the non-stub profiling says that it could take slow path, it really means that
1992 if (!Options::usePolymorphicCallInliningForNonStubStatus()
1993 && !callLinkStatus.isBasedOnStub()) {
1994 VERBOSE_LOG("Bailing inlining (non-stub polymorphism).\nStack: ", currentCodeOrigin(), "\n");
1995 return CallOptimizationResult::DidNothing;
1998 bool allAreClosureCalls = true;
1999 bool allAreDirectCalls = true;
2000 for (unsigned i = callLinkStatus.size(); i--;) {
2001 if (callLinkStatus[i].isClosureCall())
2002 allAreDirectCalls = false;
2004 allAreClosureCalls = false;
2007 Node* thingToSwitchOn;
2008 if (allAreDirectCalls)
2009 thingToSwitchOn = callTargetNode;
2010 else if (allAreClosureCalls)
2011 thingToSwitchOn = addToGraph(GetExecutable, callTargetNode);
2013 // FIXME: We should be able to handle this case, but it's tricky and we don't know of cases
2014 // where it would be beneficial. It might be best to handle these cases as if all calls were
2016 // https://bugs.webkit.org/show_bug.cgi?id=136020
2017 VERBOSE_LOG("Bailing inlining (mix).\nStack: ", currentCodeOrigin(), "\n");
2018 return CallOptimizationResult::DidNothing;
2021 VERBOSE_LOG("Doing hard inlining...\nStack: ", currentCodeOrigin(), "\n");
2023 // This makes me wish that we were in SSA all the time. We need to pick a variable into which to
2024 // store the callee so that it will be accessible to all of the blocks we're about to create. We
2025 // get away with doing an immediate-set here because we wouldn't have performed any side effects
2027 VERBOSE_LOG("Register offset: ", registerOffset);
2028 VirtualRegister calleeReg(registerOffset + CallFrameSlot::callee);
2029 calleeReg = m_inlineStackTop->remapOperand(calleeReg);
2030 VERBOSE_LOG("Callee is going to be ", calleeReg, "\n");
2031 setDirect(calleeReg, callTargetNode, ImmediateSetWithFlush);
2033 // It's OK to exit right now, even though we set some locals. That's because those locals are not
2038 SwitchData& data = *m_graph.m_switchData.add();
2039 data.kind = SwitchCell;
2040 addToGraph(Switch, OpInfo(&data), thingToSwitchOn);
2041 m_currentBlock->didLink();
2043 BasicBlock* continuationBlock = allocateUntargetableBlock();
2044 VERBOSE_LOG("Adding untargetable block ", RawPointer(continuationBlock), " (continuation)\n");
2046 // We may force this true if we give up on inlining any of the edges.
2047 bool couldTakeSlowPath = callLinkStatus.couldTakeSlowPath();
2049 VERBOSE_LOG("About to loop over functions at ", currentCodeOrigin(), ".\n");
2051 unsigned oldOffset = m_currentIndex;
2052 for (unsigned i = 0; i < callLinkStatus.size(); ++i) {
2053 m_currentIndex = oldOffset;
2054 BasicBlock* calleeEntryBlock = allocateUntargetableBlock();
2055 m_currentBlock = calleeEntryBlock;
2056 prepareToParseBlock();
2058 // At the top of each switch case, we can exit.
2061 Node* myCallTargetNode = getDirect(calleeReg);
2063 auto inliningResult = handleCallVariant(
2064 myCallTargetNode, result, callLinkStatus[i], registerOffset,
2065 thisArgument, argumentCountIncludingThis, nextOffset, kind, prediction,
2066 inliningBalance, continuationBlock, false);
2068 if (inliningResult == CallOptimizationResult::DidNothing) {
2069 // That failed so we let the block die. Nothing interesting should have been added to
2070 // the block. We also give up on inlining any of the (less frequent) callees.
2071 ASSERT(m_graph.m_blocks.last() == m_currentBlock);
2072 m_graph.killBlockAndItsContents(m_currentBlock);
2073 m_graph.m_blocks.removeLast();
2074 VERBOSE_LOG("Inlining of a poly call failed, we will have to go through a slow path\n");
2076 // The fact that inlining failed means we need a slow path.
2077 couldTakeSlowPath = true;
2081 JSCell* thingToCaseOn;
2082 if (allAreDirectCalls)
2083 thingToCaseOn = callLinkStatus[i].nonExecutableCallee();
2085 ASSERT(allAreClosureCalls);
2086 thingToCaseOn = callLinkStatus[i].executable();
2088 data.cases.append(SwitchCase(m_graph.freeze(thingToCaseOn), calleeEntryBlock));
2089 VERBOSE_LOG("Finished optimizing ", callLinkStatus[i], " at ", currentCodeOrigin(), ".\n");
2093 m_currentBlock = allocateUntargetableBlock();
2094 m_currentIndex = oldOffset;
2096 data.fallThrough = BranchTarget(m_currentBlock);
2097 prepareToParseBlock();
2098 Node* myCallTargetNode = getDirect(calleeReg);
2099 if (couldTakeSlowPath) {
2101 result, callOp, nullptr, myCallTargetNode, argumentCountIncludingThis,
2102 registerOffset, prediction);
2103 VERBOSE_LOG("We added a call in the slow path\n");
2105 addToGraph(CheckBadCell);
2106 addToGraph(Phantom, myCallTargetNode);
2107 emitArgumentPhantoms(registerOffset, argumentCountIncludingThis);
2109 set(result, addToGraph(BottomValue));
2110 VERBOSE_LOG("couldTakeSlowPath was false\n");
2113 m_currentIndex = nextOffset;
2114 m_exitOK = true; // Origin changed, so it's fine to exit again.
2115 processSetLocalQueue();
2117 if (Node* terminal = m_currentBlock->terminal())
2118 ASSERT_UNUSED(terminal, terminal->op() == TailCall || terminal->op() == TailCallVarargs || terminal->op() == TailCallForwardVarargs);
2120 addJumpTo(continuationBlock);
2123 prepareToParseBlock();
2125 m_currentIndex = oldOffset;
2126 m_currentBlock = continuationBlock;
2129 VERBOSE_LOG("Done inlining (hard).\nStack: ", currentCodeOrigin(), "\n");
2130 return CallOptimizationResult::Inlined;
2133 template<typename ChecksFunctor>
2134 bool ByteCodeParser::handleMinMax(VirtualRegister result, NodeType op, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& insertChecks)
2136 ASSERT(op == ArithMin || op == ArithMax);
2138 if (argumentCountIncludingThis == 1) {
2140 double limit = op == ArithMax ? -std::numeric_limits<double>::infinity() : +std::numeric_limits<double>::infinity();
2141 set(result, addToGraph(JSConstant, OpInfo(m_graph.freeze(jsDoubleNumber(limit)))));
2145 if (argumentCountIncludingThis == 2) {
2147 Node* resultNode = get(VirtualRegister(virtualRegisterForArgument(1, registerOffset)));
2148 addToGraph(Phantom, Edge(resultNode, NumberUse));
2149 set(result, resultNode);
2153 if (argumentCountIncludingThis == 3) {
2155 set(result, addToGraph(op, get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset))));
2159 // Don't handle >=3 arguments for now.
2163 template<typename ChecksFunctor>
2164 bool ByteCodeParser::handleIntrinsicCall(Node* callee, VirtualRegister result, Intrinsic intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks)
2166 VERBOSE_LOG(" The intrinsic is ", intrinsic, "\n");
2168 if (!isOpcodeShape<OpCallShape>(m_currentInstruction))
2171 // It so happens that the code below doesn't handle the invalid result case. We could fix that, but
2172 // it would only benefit intrinsics called as setters, like if you do:
2174 // o.__defineSetter__("foo", Math.pow)
2176 // Which is extremely amusing, but probably not worth optimizing.
2177 if (!result.isValid())
2180 bool didSetResult = false;
2181 auto setResult = [&] (Node* node) {
2182 RELEASE_ASSERT(!didSetResult);
2184 didSetResult = true;
2187 auto inlineIntrinsic = [&] {
2188 switch (intrinsic) {
2190 // Intrinsic Functions:
2192 case AbsIntrinsic: {
2193 if (argumentCountIncludingThis == 1) { // Math.abs()
2195 setResult(addToGraph(JSConstant, OpInfo(m_constantNaN)));
2199 if (!MacroAssembler::supportsFloatingPointAbs())
2203 Node* node = addToGraph(ArithAbs, get(virtualRegisterForArgument(1, registerOffset)));
2204 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
2205 node->mergeFlags(NodeMayOverflowInt32InDFG);
2212 if (handleMinMax(result, intrinsic == MinIntrinsic ? ArithMin : ArithMax, registerOffset, argumentCountIncludingThis, insertChecks)) {
2213 didSetResult = true;
2218 #define DFG_ARITH_UNARY(capitalizedName, lowerName) \
2219 case capitalizedName##Intrinsic:
2220 FOR_EACH_DFG_ARITH_UNARY_OP(DFG_ARITH_UNARY)
2221 #undef DFG_ARITH_UNARY
2223 if (argumentCountIncludingThis == 1) {
2225 setResult(addToGraph(JSConstant, OpInfo(m_constantNaN)));
2228 Arith::UnaryType type = Arith::UnaryType::Sin;
2229 switch (intrinsic) {
2230 #define DFG_ARITH_UNARY(capitalizedName, lowerName) \
2231 case capitalizedName##Intrinsic: \
2232 type = Arith::UnaryType::capitalizedName; \
2234 FOR_EACH_DFG_ARITH_UNARY_OP(DFG_ARITH_UNARY)
2235 #undef DFG_ARITH_UNARY
2237 RELEASE_ASSERT_NOT_REACHED();
2240 setResult(addToGraph(ArithUnary, OpInfo(static_cast<std::underlying_type<Arith::UnaryType>::type>(type)), get(virtualRegisterForArgument(1, registerOffset))));
2244 case FRoundIntrinsic:
2245 case SqrtIntrinsic: {
2246 if (argumentCountIncludingThis == 1) {
2248 setResult(addToGraph(JSConstant, OpInfo(m_constantNaN)));
2252 NodeType nodeType = Unreachable;
2253 switch (intrinsic) {
2254 case FRoundIntrinsic:
2255 nodeType = ArithFRound;
2258 nodeType = ArithSqrt;
2261 RELEASE_ASSERT_NOT_REACHED();
2264 setResult(addToGraph(nodeType, get(virtualRegisterForArgument(1, registerOffset))));
2268 case PowIntrinsic: {
2269 if (argumentCountIncludingThis < 3) {
2270 // Math.pow() and Math.pow(x) return NaN.
2272 setResult(addToGraph(JSConstant, OpInfo(m_constantNaN)));
2276 VirtualRegister xOperand = virtualRegisterForArgument(1, registerOffset);
2277 VirtualRegister yOperand = virtualRegisterForArgument(2, registerOffset);
2278 setResult(addToGraph(ArithPow, get(xOperand), get(yOperand)));
2282 case ArrayPushIntrinsic: {
2283 #if USE(JSVALUE32_64)
2285 if (argumentCountIncludingThis > 2)
2290 if (static_cast<unsigned>(argumentCountIncludingThis) >= MIN_SPARSE_ARRAY_INDEX)
2293 ArrayMode arrayMode = getArrayMode(Array::Write);
2294 if (!arrayMode.isJSArray())
2296 switch (arrayMode.type()) {
2299 case Array::Contiguous:
2300 case Array::ArrayStorage: {
2303 addVarArgChild(nullptr); // For storage.
2304 for (int i = 0; i < argumentCountIncludingThis; ++i)
2305 addVarArgChild(get(virtualRegisterForArgument(i, registerOffset)));
2306 Node* arrayPush = addToGraph(Node::VarArg, ArrayPush, OpInfo(arrayMode.asWord()), OpInfo(prediction));
2307 setResult(arrayPush);
2316 case ArraySliceIntrinsic: {
2317 #if USE(JSVALUE32_64)
2319 // There aren't enough registers for this to be done easily.
2323 if (argumentCountIncludingThis < 1)
2326 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadConstantCache)
2327 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache))
2330 ArrayMode arrayMode = getArrayMode(Array::Read);
2331 if (!arrayMode.isJSArray())
2334 if (!arrayMode.isJSArrayWithOriginalStructure())
2337 switch (arrayMode.type()) {
2340 case Array::Contiguous: {
2341 JSGlobalObject* globalObject = m_graph.globalObjectFor(currentNodeOrigin().semantic);
2343 Structure* arrayPrototypeStructure = globalObject->arrayPrototype()->structure(*m_vm);
2344 Structure* objectPrototypeStructure = globalObject->objectPrototype()->structure(*m_vm);
2346 // FIXME: We could easily relax the Array/Object.prototype transition as long as we OSR exitted if we saw a hole.
2347 // https://bugs.webkit.org/show_bug.cgi?id=173171
2348 if (globalObject->arraySpeciesWatchpointSet().state() == IsWatched
2349 && globalObject->havingABadTimeWatchpoint()->isStillValid()
2350 && arrayPrototypeStructure->transitionWatchpointSetIsStillValid()
2351 && objectPrototypeStructure->transitionWatchpointSetIsStillValid()
2352 && globalObject->arrayPrototypeChainIsSane()) {
2354 m_graph.watchpoints().addLazily(globalObject->arraySpeciesWatchpointSet());
2355 m_graph.watchpoints().addLazily(globalObject->havingABadTimeWatchpoint());
2356 m_graph.registerAndWatchStructureTransition(arrayPrototypeStructure);
2357 m_graph.registerAndWatchStructureTransition(objectPrototypeStructure);
2361 Node* array = get(virtualRegisterForArgument(0, registerOffset));
2362 // We do a few things here to prove that we aren't skipping doing side-effects in an observable way:
2363 // 1. We ensure that the "constructor" property hasn't been changed (because the observable
2364 // effects of slice require that we perform a Get(array, "constructor") and we can skip
2365 // that if we're an original array structure. (We can relax this in the future by using
2366 // TryGetById and CheckCell).
2368 // 2. We check that the array we're calling slice on has the same global object as the lexical
2369 // global object that this code is running in. This requirement is necessary because we setup the
2370 // watchpoints above on the lexical global object. This means that code that calls slice on
2371 // arrays produced by other global objects won't get this optimization. We could relax this
2372 // requirement in the future by checking that the watchpoint hasn't fired at runtime in the code
2373 // we generate instead of registering it as a watchpoint that would invalidate the compilation.
2375 // 3. By proving we're an original array structure, we guarantee that the incoming array
2376 // isn't a subclass of Array.
2378 StructureSet structureSet;
2379 structureSet.add(globalObject->originalArrayStructureForIndexingType(ArrayWithInt32));
2380 structureSet.add(globalObject->originalArrayStructureForIndexingType(ArrayWithContiguous));
2381 structureSet.add(globalObject->originalArrayStructureForIndexingType(ArrayWithDouble));
2382 structureSet.add(globalObject->originalArrayStructureForIndexingType(CopyOnWriteArrayWithInt32));
2383 structureSet.add(globalObject->originalArrayStructureForIndexingType(CopyOnWriteArrayWithContiguous));
2384 structureSet.add(globalObject->originalArrayStructureForIndexingType(CopyOnWriteArrayWithDouble));
2385 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(structureSet)), array);
2387 addVarArgChild(array);
2388 if (argumentCountIncludingThis >= 2)
2389 addVarArgChild(get(virtualRegisterForArgument(1, registerOffset))); // Start index.
2390 if (argumentCountIncludingThis >= 3)
2391 addVarArgChild(get(virtualRegisterForArgument(2, registerOffset))); // End index.
2392 addVarArgChild(addToGraph(GetButterfly, array));
2394 Node* arraySlice = addToGraph(Node::VarArg, ArraySlice, OpInfo(), OpInfo());
2395 setResult(arraySlice);
2405 RELEASE_ASSERT_NOT_REACHED();
2409 case ArrayIndexOfIntrinsic: {
2410 if (argumentCountIncludingThis < 2)
2413 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIndexingType)
2414 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadConstantCache)
2415 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)
2416 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
2419 ArrayMode arrayMode = getArrayMode(Array::Read);
2420 if (!arrayMode.isJSArray())
2423 if (!arrayMode.isJSArrayWithOriginalStructure())
2426 // We do not want to convert arrays into one type just to perform indexOf.
2427 if (arrayMode.doesConversion())
2430 switch (arrayMode.type()) {
2433 case Array::Contiguous: {
2434 JSGlobalObject* globalObject = m_graph.globalObjectFor(currentNodeOrigin().semantic);
2436 Structure* arrayPrototypeStructure = globalObject->arrayPrototype()->structure(*m_vm);
2437 Structure* objectPrototypeStructure = globalObject->objectPrototype()->structure(*m_vm);
2439 // FIXME: We could easily relax the Array/Object.prototype transition as long as we OSR exitted if we saw a hole.
2440 // https://bugs.webkit.org/show_bug.cgi?id=173171
2441 if (arrayPrototypeStructure->transitionWatchpointSetIsStillValid()
2442 && objectPrototypeStructure->transitionWatchpointSetIsStillValid()
2443 && globalObject->arrayPrototypeChainIsSane()) {
2445 m_graph.registerAndWatchStructureTransition(arrayPrototypeStructure);
2446 m_graph.registerAndWatchStructureTransition(objectPrototypeStructure);
2450 Node* array = get(virtualRegisterForArgument(0, registerOffset));
2451 addVarArgChild(array);
2452 addVarArgChild(get(virtualRegisterForArgument(1, registerOffset))); // Search element.
2453 if (argumentCountIncludingThis >= 3)
2454 addVarArgChild(get(virtualRegisterForArgument(2, registerOffset))); // Start index.
2455 addVarArgChild(nullptr);
2457 Node* node = addToGraph(Node::VarArg, ArrayIndexOf, OpInfo(arrayMode.asWord()), OpInfo());
2468 RELEASE_ASSERT_NOT_REACHED();
2473 case ArrayPopIntrinsic: {
2474 if (argumentCountIncludingThis != 1)
2477 ArrayMode arrayMode = getArrayMode(Array::Write);
2478 if (!arrayMode.isJSArray())
2480 switch (arrayMode.type()) {
2483 case Array::Contiguous:
2484 case Array::ArrayStorage: {
2486 Node* arrayPop = addToGraph(ArrayPop, OpInfo(arrayMode.asWord()), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)));
2487 setResult(arrayPop);
2496 case AtomicsAddIntrinsic:
2497 case AtomicsAndIntrinsic:
2498 case AtomicsCompareExchangeIntrinsic:
2499 case AtomicsExchangeIntrinsic:
2500 case AtomicsIsLockFreeIntrinsic:
2501 case AtomicsLoadIntrinsic:
2502 case AtomicsOrIntrinsic:
2503 case AtomicsStoreIntrinsic:
2504 case AtomicsSubIntrinsic:
2505 case AtomicsXorIntrinsic: {
2509 NodeType op = LastNodeType;
2510 Array::Action action = Array::Write;
2511 unsigned numArgs = 0; // Number of actual args; we add one for the backing store pointer.
2512 switch (intrinsic) {
2513 case AtomicsAddIntrinsic:
2517 case AtomicsAndIntrinsic:
2521 case AtomicsCompareExchangeIntrinsic:
2522 op = AtomicsCompareExchange;
2525 case AtomicsExchangeIntrinsic:
2526 op = AtomicsExchange;
2529 case AtomicsIsLockFreeIntrinsic:
2530 // This gets no backing store, but we need no special logic for this since this also does
2531 // not need varargs.
2532 op = AtomicsIsLockFree;
2535 case AtomicsLoadIntrinsic:
2538 action = Array::Read;
2540 case AtomicsOrIntrinsic:
2544 case AtomicsStoreIntrinsic:
2548 case AtomicsSubIntrinsic:
2552 case AtomicsXorIntrinsic:
2557 RELEASE_ASSERT_NOT_REACHED();
2561 if (static_cast<unsigned>(argumentCountIncludingThis) < 1 + numArgs)
2566 Vector<Node*, 3> args;
2567 for (unsigned i = 0; i < numArgs; ++i)
2568 args.append(get(virtualRegisterForArgument(1 + i, registerOffset)));
2571 if (numArgs + 1 <= 3) {
2572 while (args.size() < 3)
2573 args.append(nullptr);
2574 resultNode = addToGraph(op, OpInfo(ArrayMode(Array::SelectUsingPredictions, action).asWord()), OpInfo(prediction), args[0], args[1], args[2]);
2576 for (Node* node : args)
2577 addVarArgChild(node);
2578 addVarArgChild(nullptr);
2579 resultNode = addToGraph(Node::VarArg, op, OpInfo(ArrayMode(Array::SelectUsingPredictions, action).asWord()), OpInfo(prediction));
2582 setResult(resultNode);
2586 case ParseIntIntrinsic: {
2587 if (argumentCountIncludingThis < 2)
2590 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell) || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
2594 VirtualRegister valueOperand = virtualRegisterForArgument(1, registerOffset);
2596 if (argumentCountIncludingThis == 2)
2597 parseInt = addToGraph(ParseInt, OpInfo(), OpInfo(prediction), get(valueOperand));
2599 ASSERT(argumentCountIncludingThis > 2);
2600 VirtualRegister radixOperand = virtualRegisterForArgument(2, registerOffset);
2601 parseInt = addToGraph(ParseInt, OpInfo(), OpInfo(prediction), get(valueOperand), get(radixOperand));
2603 setResult(parseInt);
2607 case CharCodeAtIntrinsic: {
2608 if (argumentCountIncludingThis != 2)
2612 VirtualRegister thisOperand = virtualRegisterForArgument(0, registerOffset);
2613 VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
2614 Node* charCode = addToGraph(StringCharCodeAt, OpInfo(ArrayMode(Array::String, Array::Read).asWord()), get(thisOperand), get(indexOperand));
2616 setResult(charCode);
2620 case CharAtIntrinsic: {
2621 if (argumentCountIncludingThis != 2)
2625 VirtualRegister thisOperand = virtualRegisterForArgument(0, registerOffset);
2626 VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
2627 Node* charCode = addToGraph(StringCharAt, OpInfo(ArrayMode(Array::String, Array::Read).asWord()), get(thisOperand), get(indexOperand));
2629 setResult(charCode);
2632 case Clz32Intrinsic: {
2634 if (argumentCountIncludingThis == 1)
2635 setResult(addToGraph(JSConstant, OpInfo(m_graph.freeze(jsNumber(32)))));
2637 Node* operand = get(virtualRegisterForArgument(1, registerOffset));
2638 setResult(addToGraph(ArithClz32, operand));
2642 case FromCharCodeIntrinsic: {
2643 if (argumentCountIncludingThis != 2)
2647 VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
2648 Node* charCode = addToGraph(StringFromCharCode, get(indexOperand));
2650 setResult(charCode);
2655 case RegExpExecIntrinsic: {
2656 if (argumentCountIncludingThis != 2)
2660 Node* regExpExec = addToGraph(RegExpExec, OpInfo(0), OpInfo(prediction), addToGraph(GetGlobalObject, callee), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)));
2661 setResult(regExpExec);
2666 case RegExpTestIntrinsic:
2667 case RegExpTestFastIntrinsic: {
2668 if (argumentCountIncludingThis != 2)
2671 if (intrinsic == RegExpTestIntrinsic) {
2672 // Don't inline intrinsic if we exited due to one of the primordial RegExp checks failing.
2673 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell))
2676 JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
2677 Structure* regExpStructure = globalObject->regExpStructure();
2678 m_graph.registerStructure(regExpStructure);
2679 ASSERT(regExpStructure->storedPrototype().isObject());
2680 ASSERT(regExpStructure->storedPrototype().asCell()->classInfo(*m_vm) == RegExpPrototype::info());
2682 FrozenValue* regExpPrototypeObjectValue = m_graph.freeze(regExpStructure->storedPrototype());
2683 Structure* regExpPrototypeStructure = regExpPrototypeObjectValue->structure();
2685 auto isRegExpPropertySame = [&] (JSValue primordialProperty, UniquedStringImpl* propertyUID) {
2686 JSValue currentProperty;
2687 if (!m_graph.getRegExpPrototypeProperty(regExpStructure->storedPrototypeObject(), regExpPrototypeStructure, propertyUID, currentProperty))
2690 return currentProperty == primordialProperty;
2693 // Check that RegExp.exec is still the primordial RegExp.prototype.exec
2694 if (!isRegExpPropertySame(globalObject->regExpProtoExecFunction(), m_vm->propertyNames->exec.impl()))
2697 // Check that regExpObject is actually a RegExp object.
2698 Node* regExpObject = get(virtualRegisterForArgument(0, registerOffset));
2699 addToGraph(Check, Edge(regExpObject, RegExpObjectUse));
2701 // Check that regExpObject's exec is actually the primodial RegExp.prototype.exec.
2702 UniquedStringImpl* execPropertyID = m_vm->propertyNames->exec.impl();
2703 unsigned execIndex = m_graph.identifiers().ensure(execPropertyID);
2704 Node* actualProperty = addToGraph(TryGetById, OpInfo(execIndex), OpInfo(SpecFunction), Edge(regExpObject, CellUse));
2705 FrozenValue* regExpPrototypeExec = m_graph.freeze(globalObject->regExpProtoExecFunction());
2706 addToGraph(CheckCell, OpInfo(regExpPrototypeExec), Edge(actualProperty, CellUse));
2710 Node* regExpObject = get(virtualRegisterForArgument(0, registerOffset));
2711 Node* regExpExec = addToGraph(RegExpTest, OpInfo(0), OpInfo(prediction), addToGraph(GetGlobalObject, callee), regExpObject, get(virtualRegisterForArgument(1, registerOffset)));
2712 setResult(regExpExec);
2717 case RegExpMatchFastIntrinsic: {
2718 RELEASE_ASSERT(argumentCountIncludingThis == 2);
2721 Node* regExpMatch = addToGraph(RegExpMatchFast, OpInfo(0), OpInfo(prediction), addToGraph(GetGlobalObject, callee), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)));
2722 setResult(regExpMatch);
2726 case ObjectCreateIntrinsic: {
2727 if (argumentCountIncludingThis != 2)
2731 setResult(addToGraph(ObjectCreate, get(virtualRegisterForArgument(1, registerOffset))));
2735 case ObjectGetPrototypeOfIntrinsic: {
2736 if (argumentCountIncludingThis != 2)
2740 setResult(addToGraph(GetPrototypeOf, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(1, registerOffset))));
2744 case ObjectIsIntrinsic: {
2745 if (argumentCountIncludingThis < 3)
2749 setResult(addToGraph(SameValue, get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset))));
2753 case ObjectKeysIntrinsic: {
2754 if (argumentCountIncludingThis < 2)
2758 setResult(addToGraph(ObjectKeys, get(virtualRegisterForArgument(1, registerOffset))));
2762 case ReflectGetPrototypeOfIntrinsic: {
2763 if (argumentCountIncludingThis != 2)
2767 setResult(addToGraph(GetPrototypeOf, OpInfo(0), OpInfo(prediction), Edge(get(virtualRegisterForArgument(1, registerOffset)), ObjectUse)));
2771 case IsTypedArrayViewIntrinsic: {
2772 ASSERT(argumentCountIncludingThis == 2);
2775 setResult(addToGraph(IsTypedArrayView, OpInfo(prediction), get(virtualRegisterForArgument(1, registerOffset))));
2779 case StringPrototypeValueOfIntrinsic: {
2781 Node* value = get(virtualRegisterForArgument(0, registerOffset));
2782 setResult(addToGraph(StringValueOf, value));
2786 case StringPrototypeReplaceIntrinsic: {
2787 if (argumentCountIncludingThis != 3)
2790 // Don't inline intrinsic if we exited due to "search" not being a RegExp or String object.
2791 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
2794 // Don't inline intrinsic if we exited due to one of the primordial RegExp checks failing.
2795 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell))
2798 JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
2799 Structure* regExpStructure = globalObject->regExpStructure();
2800 m_graph.registerStructure(regExpStructure);
2801 ASSERT(regExpStructure->storedPrototype().isObject());
2802 ASSERT(regExpStructure->storedPrototype().asCell()->classInfo(*m_vm) == RegExpPrototype::info());
2804 FrozenValue* regExpPrototypeObjectValue = m_graph.freeze(regExpStructure->storedPrototype());
2805 Structure* regExpPrototypeStructure = regExpPrototypeObjectValue->structure();
2807 auto isRegExpPropertySame = [&] (JSValue primordialProperty, UniquedStringImpl* propertyUID) {
2808 JSValue currentProperty;
2809 if (!m_graph.getRegExpPrototypeProperty(regExpStructure->storedPrototypeObject(), regExpPrototypeStructure, propertyUID, currentProperty))
2812 return currentProperty == primordialProperty;
2815 // Check that searchRegExp.exec is still the primordial RegExp.prototype.exec
2816 if (!isRegExpPropertySame(globalObject->regExpProtoExecFunction(), m_vm->propertyNames->exec.impl()))
2819 // Check that searchRegExp.global is still the primordial RegExp.prototype.global
2820 if (!isRegExpPropertySame(globalObject->regExpProtoGlobalGetter(), m_vm->propertyNames->global.impl()))
2823 // Check that searchRegExp.unicode is still the primordial RegExp.prototype.unicode
2824 if (!isRegExpPropertySame(globalObject->regExpProtoUnicodeGetter(), m_vm->propertyNames->unicode.impl()))
2827 // Check that searchRegExp[Symbol.match] is still the primordial RegExp.prototype[Symbol.replace]
2828 if (!isRegExpPropertySame(globalObject->regExpProtoSymbolReplaceFunction(), m_vm->propertyNames->replaceSymbol.impl()))
2833 Node* resultNode = addToGraph(StringReplace, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset)));
2834 setResult(resultNode);
2838 case StringPrototypeReplaceRegExpIntrinsic: {
2839 if (argumentCountIncludingThis != 3)
2843 Node* resultNode = addToGraph(StringReplaceRegExp, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset)));
2844 setResult(resultNode);
2848 case RoundIntrinsic:
2849 case FloorIntrinsic:
2851 case TruncIntrinsic: {
2852 if (argumentCountIncludingThis == 1) {
2854 setResult(addToGraph(JSConstant, OpInfo(m_constantNaN)));
2858 Node* operand = get(virtualRegisterForArgument(1, registerOffset));
2860 if (intrinsic == RoundIntrinsic)
2862 else if (intrinsic == FloorIntrinsic)
2864 else if (intrinsic == CeilIntrinsic)
2867 ASSERT(intrinsic == TruncIntrinsic);
2870 Node* roundNode = addToGraph(op, OpInfo(0), OpInfo(prediction), operand);
2871 setResult(roundNode);
2874 case IMulIntrinsic: {
2875 if (argumentCountIncludingThis != 3)
2878 VirtualRegister leftOperand = virtualRegisterForArgument(1, registerOffset);
2879 VirtualRegister rightOperand = virtualRegisterForArgument(2, registerOffset);
2880 Node* left = get(leftOperand);
2881 Node* right = get(rightOperand);
2882 setResult(addToGraph(ArithIMul, left, right));
2886 case RandomIntrinsic: {
2887 if (argumentCountIncludingThis != 1)
2890 setResult(addToGraph(ArithRandom));
2894 case DFGTrueIntrinsic: {
2896 setResult(jsConstant(jsBoolean(true)));
2900 case FTLTrueIntrinsic: {
2902 setResult(jsConstant(jsBoolean(m_graph.m_plan.isFTL())));
2906 case OSRExitIntrinsic: {
2908 addToGraph(ForceOSRExit);
2909 setResult(addToGraph(JSConstant, OpInfo(m_constantUndefined)));
2913 case IsFinalTierIntrinsic: {
2915 setResult(jsConstant(jsBoolean(Options::useFTLJIT() ? m_graph.m_plan.isFTL() : true)));
2919 case SetInt32HeapPredictionIntrinsic: {
2921 for (int i = 1; i < argumentCountIncludingThis; ++i) {
2922 Node* node = get(virtualRegisterForArgument(i, registerOffset));
2923 if (node->hasHeapPrediction())
2924 node->setHeapPrediction(SpecInt32Only);
2926 setResult(addToGraph(JSConstant, OpInfo(m_constantUndefined)));
2930 case CheckInt32Intrinsic: {
2932 for (int i = 1; i < argumentCountIncludingThis; ++i) {
2933 Node* node = get(virtualRegisterForArgument(i, registerOffset));
2934 addToGraph(Phantom, Edge(node, Int32Use));
2936 setResult(jsConstant(jsBoolean(true)));
2940 case FiatInt52Intrinsic: {
2941 if (argumentCountIncludingThis != 2)
2944 VirtualRegister operand = virtualRegisterForArgument(1, registerOffset);
2946 setResult(addToGraph(FiatInt52, get(operand)));
2948 setResult(get(operand));
2952 case JSMapGetIntrinsic: {
2953 if (argumentCountIncludingThis != 2)
2957 Node* map = get(virtualRegisterForArgument(0, registerOffset));
2958 Node* key = get(virtualRegisterForArgument(1, registerOffset));
2959 Node* normalizedKey = addToGraph(NormalizeMapKey, key);
2960 Node* hash = addToGraph(MapHash, normalizedKey);
2961 Node* bucket = addToGraph(GetMapBucket, Edge(map, MapObjectUse), Edge(normalizedKey), Edge(hash));
2962 Node* resultNode = addToGraph(LoadValueFromMapBucket, OpInfo(BucketOwnerType::Map), OpInfo(prediction), bucket);
2963 setResult(resultNode);
2967 case JSSetHasIntrinsic:
2968 case JSMapHasIntrinsic: {
2969 if (argumentCountIncludingThis != 2)
2973 Node* mapOrSet = get(virtualRegisterForArgument(0, registerOffset));
2974 Node* key = get(virtualRegisterForArgument(1, registerOffset));
2975 Node* normalizedKey = addToGraph(NormalizeMapKey, key);
2976 Node* hash = addToGraph(MapHash, normalizedKey);
2977 UseKind useKind = intrinsic == JSSetHasIntrinsic ? SetObjectUse : MapObjectUse;
2978 Node* bucket = addToGraph(GetMapBucket, OpInfo(0), Edge(mapOrSet, useKind), Edge(normalizedKey), Edge(hash));
2979 JSCell* sentinel = nullptr;
2980 if (intrinsic == JSMapHasIntrinsic)
2981 sentinel = m_vm->sentinelMapBucket();
2983 sentinel = m_vm->sentinelSetBucket();
2985 FrozenValue* frozenPointer = m_graph.freeze(sentinel);
2986 Node* invertedResult = addToGraph(CompareEqPtr, OpInfo(frozenPointer), bucket);
2987 Node* resultNode = addToGraph(LogicalNot, invertedResult);
2988 setResult(resultNode);
2992 case JSSetAddIntrinsic: {
2993 if (argumentCountIncludingThis != 2)
2997 Node* base = get(virtualRegisterForArgument(0, registerOffset));
2998 Node* key = get(virtualRegisterForArgument(1, registerOffset));
2999 Node* normalizedKey = addToGraph(NormalizeMapKey, key);
3000 Node* hash = addToGraph(MapHash, normalizedKey);
3001 addToGraph(SetAdd, base, normalizedKey, hash);
3006 case JSMapSetIntrinsic: {
3007 if (argumentCountIncludingThis != 3)
3011 Node* base = get(virtualRegisterForArgument(0, registerOffset));
3012 Node* key = get(virtualRegisterForArgument(1, registerOffset));
3013 Node* value = get(virtualRegisterForArgument(2, registerOffset));
3015 Node* normalizedKey = addToGraph(NormalizeMapKey, key);
3016 Node* hash = addToGraph(MapHash, normalizedKey);
3018 addVarArgChild(base);
3019 addVarArgChild(normalizedKey);
3020 addVarArgChild(value);
3021 addVarArgChild(hash);
3022 addToGraph(Node::VarArg, MapSet, OpInfo(0), OpInfo(0));
3027 case JSSetBucketHeadIntrinsic:
3028 case JSMapBucketHeadIntrinsic: {
3029 ASSERT(argumentCountIncludingThis == 2);
3032 Node* map = get(virtualRegisterForArgument(1, registerOffset));
3033 UseKind useKind = intrinsic == JSSetBucketHeadIntrinsic ? SetObjectUse : MapObjectUse;
3034 Node* resultNode = addToGraph(GetMapBucketHead, Edge(map, useKind));
3035 setResult(resultNode);
3039 case JSSetBucketNextIntrinsic:
3040 case JSMapBucketNextIntrinsic: {
3041 ASSERT(argumentCountIncludingThis == 2);
3044 Node* bucket = get(virtualRegisterForArgument(1, registerOffset));
3045 BucketOwnerType type = intrinsic == JSSetBucketNextIntrinsic ? BucketOwnerType::Set : BucketOwnerType::Map;
3046 Node* resultNode = addToGraph(GetMapBucketNext, OpInfo(type), bucket);
3047 setResult(resultNode);
3051 case JSSetBucketKeyIntrinsic:
3052 case JSMapBucketKeyIntrinsic: {
3053 ASSERT(argumentCountIncludingThis == 2);
3056 Node* bucket = get(virtualRegisterForArgument(1, registerOffset));
3057 BucketOwnerType type = intrinsic == JSSetBucketKeyIntrinsic ? BucketOwnerType::Set : BucketOwnerType::Map;
3058 Node* resultNode = addToGraph(LoadKeyFromMapBucket, OpInfo(type), OpInfo(prediction), bucket);
3059 setResult(resultNode);
3063 case JSMapBucketValueIntrinsic: {
3064 ASSERT(argumentCountIncludingThis == 2);
3067 Node* bucket = get(virtualRegisterForArgument(1, registerOffset));
3068 Node* resultNode = addToGraph(LoadValueFromMapBucket, OpInfo(BucketOwnerType::Map), OpInfo(prediction), bucket);
3069 setResult(resultNode);
3073 case JSWeakMapGetIntrinsic: {
3074 if (argumentCountIncludingThis != 2)
3077 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3081 Node* map = get(virtualRegisterForArgument(0, registerOffset));
3082 Node* key = get(virtualRegisterForArgument(1, registerOffset));
3083 addToGraph(Check, Edge(key, ObjectUse));
3084 Node* hash = addToGraph(MapHash, key);
3085 Node* holder = addToGraph(WeakMapGet, Edge(map, WeakMapObjectUse), Edge(key, ObjectUse), Edge(hash, Int32Use));
3086 Node* resultNode = addToGraph(ExtractValueFromWeakMapGet, OpInfo(), OpInfo(prediction), holder);
3088 setResult(resultNode);
3092 case JSWeakMapHasIntrinsic: {
3093 if (argumentCountIncludingThis != 2)
3096 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))