2 * Copyright (C) 2011-2017 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "DFGByteCodeParser.h"
31 #include "ArithProfile.h"
32 #include "ArrayConstructor.h"
33 #include "BasicBlockLocation.h"
34 #include "BytecodeStructs.h"
35 #include "CallLinkStatus.h"
36 #include "CodeBlock.h"
37 #include "CodeBlockWithJITType.h"
38 #include "CommonSlowPaths.h"
39 #include "DFGAbstractHeap.h"
40 #include "DFGArrayMode.h"
42 #include "DFGCapabilities.h"
43 #include "DFGClobberize.h"
44 #include "DFGClobbersExitState.h"
46 #include "DFGJITCode.h"
47 #include "FunctionCodeBlock.h"
48 #include "GetByIdStatus.h"
50 #include "JSCInlines.h"
51 #include "JSModuleEnvironment.h"
52 #include "JSModuleNamespaceObject.h"
53 #include "NumberConstructor.h"
54 #include "ObjectConstructor.h"
55 #include "PreciseJumpTargets.h"
56 #include "PutByIdFlags.h"
57 #include "PutByIdStatus.h"
58 #include "RegExpPrototype.h"
59 #include "StackAlignment.h"
60 #include "StringConstructor.h"
61 #include "StructureStubInfo.h"
63 #include <wtf/CommaPrinter.h>
64 #include <wtf/HashMap.h>
65 #include <wtf/MathExtras.h>
66 #include <wtf/SetForScope.h>
67 #include <wtf/StdLibExtras.h>
69 namespace JSC { namespace DFG {
71 namespace DFGByteCodeParserInternal {
73 static const bool verbose = false;
75 static const bool verbose = true;
77 } // namespace DFGByteCodeParserInternal
79 #define VERBOSE_LOG(...) do { \
80 if (DFGByteCodeParserInternal::verbose && Options::verboseDFGBytecodeParsing()) \
81 dataLog(__VA_ARGS__); \
84 class ConstantBufferKey {
92 ConstantBufferKey(WTF::HashTableDeletedValueType)
98 ConstantBufferKey(CodeBlock* codeBlock, unsigned index)
99 : m_codeBlock(codeBlock)
104 bool operator==(const ConstantBufferKey& other) const
106 return m_codeBlock == other.m_codeBlock
107 && m_index == other.m_index;
110 unsigned hash() const
112 return WTF::PtrHash<CodeBlock*>::hash(m_codeBlock) ^ m_index;
115 bool isHashTableDeletedValue() const
117 return !m_codeBlock && m_index;
120 CodeBlock* codeBlock() const { return m_codeBlock; }
121 unsigned index() const { return m_index; }
124 CodeBlock* m_codeBlock;
128 struct ConstantBufferKeyHash {
129 static unsigned hash(const ConstantBufferKey& key) { return key.hash(); }
130 static bool equal(const ConstantBufferKey& a, const ConstantBufferKey& b)
135 static const bool safeToCompareToEmptyOrDeleted = true;
138 } } // namespace JSC::DFG
142 template<typename T> struct DefaultHash;
143 template<> struct DefaultHash<JSC::DFG::ConstantBufferKey> {
144 typedef JSC::DFG::ConstantBufferKeyHash Hash;
147 template<typename T> struct HashTraits;
148 template<> struct HashTraits<JSC::DFG::ConstantBufferKey> : SimpleClassHashTraits<JSC::DFG::ConstantBufferKey> { };
152 namespace JSC { namespace DFG {
154 // === ByteCodeParser ===
156 // This class is used to compile the dataflow graph from a CodeBlock.
157 class ByteCodeParser {
159 ByteCodeParser(Graph& graph)
161 , m_codeBlock(graph.m_codeBlock)
162 , m_profiledBlock(graph.m_profiledBlock)
166 , m_constantUndefined(graph.freeze(jsUndefined()))
167 , m_constantNull(graph.freeze(jsNull()))
168 , m_constantNaN(graph.freeze(jsNumber(PNaN)))
169 , m_constantOne(graph.freeze(jsNumber(1)))
170 , m_numArguments(m_codeBlock->numParameters())
171 , m_numLocals(m_codeBlock->m_numCalleeLocals)
172 , m_parameterSlots(0)
173 , m_numPassedVarArgs(0)
174 , m_inlineStackTop(0)
175 , m_currentInstruction(0)
176 , m_hasDebuggerEnabled(graph.hasDebuggerEnabled())
178 ASSERT(m_profiledBlock);
181 // Parse a full CodeBlock of bytecode.
185 struct InlineStackEntry;
187 // Just parse from m_currentIndex to the end of the current CodeBlock.
188 void parseCodeBlock();
190 void ensureLocals(unsigned newNumLocals)
192 VERBOSE_LOG(" ensureLocals: trying to raise m_numLocals from ", m_numLocals, " to ", newNumLocals, "\n");
193 if (newNumLocals <= m_numLocals)
195 m_numLocals = newNumLocals;
196 for (size_t i = 0; i < m_graph.numBlocks(); ++i)
197 m_graph.block(i)->ensureLocals(newNumLocals);
200 // Helper for min and max.
201 template<typename ChecksFunctor>
202 bool handleMinMax(int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& insertChecks);
204 void refineStatically(CallLinkStatus&, Node* callTarget);
205 // Blocks can either be targetable (i.e. in the m_blockLinkingTargets of one InlineStackEntry) with a well-defined bytecodeBegin,
206 // or they can be untargetable, with bytecodeBegin==UINT_MAX, to be managed manually and not by the linkBlock machinery.
207 // This is used most notably when doing polyvariant inlining (it requires a fair bit of control-flow with no bytecode analog).
208 // It is also used when doing an early return from an inlined callee: it is easier to fix the bytecode index later on if needed
209 // than to move the right index all the way to the treatment of op_ret.
210 BasicBlock* allocateTargetableBlock(unsigned bytecodeIndex);
211 BasicBlock* allocateUntargetableBlock();
212 // An untargetable block can be given a bytecodeIndex to be later managed by linkBlock, but only once, and it can never go in the other direction
213 void makeBlockTargetable(BasicBlock*, unsigned bytecodeIndex);
214 void addJumpTo(BasicBlock*);
215 void addJumpTo(unsigned bytecodeIndex);
216 // Handle calls. This resolves issues surrounding inlining and intrinsics.
217 enum Terminality { Terminal, NonTerminal };
218 Terminality handleCall(
219 int result, NodeType op, InlineCallFrame::Kind, unsigned instructionSize,
220 Node* callTarget, int argumentCountIncludingThis, int registerOffset, CallLinkStatus,
221 SpeculatedType prediction);
222 Terminality handleCall(Instruction* pc, NodeType op, CallMode);
223 Terminality handleVarargsCall(Instruction* pc, NodeType op, CallMode);
224 void emitFunctionChecks(CallVariant, Node* callTarget, VirtualRegister thisArgumnt);
225 void emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis);
226 Node* getArgumentCount();
227 bool handleRecursiveTailCall(Node* callTargetNode, const CallLinkStatus&, int registerOffset, VirtualRegister thisArgument, int argumentCountIncludingThis);
228 unsigned inliningCost(CallVariant, int argumentCountIncludingThis, InlineCallFrame::Kind); // Return UINT_MAX if it's not an inlining candidate. By convention, intrinsics have a cost of 1.
229 // Handle inlining. Return true if it succeeded, false if we need to plant a call.
230 bool handleInlining(Node* callTargetNode, int resultOperand, const CallLinkStatus&, int registerOffset, VirtualRegister thisArgument, VirtualRegister argumentsArgument, unsigned argumentsOffset, int argumentCountIncludingThis, unsigned nextOffset, NodeType callOp, InlineCallFrame::Kind, SpeculatedType prediction);
231 template<typename ChecksFunctor>
232 bool attemptToInlineCall(Node* callTargetNode, int resultOperand, CallVariant, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind, SpeculatedType prediction, unsigned& inliningBalance, BasicBlock* continuationBlock, const ChecksFunctor& insertChecks);
233 template<typename ChecksFunctor>
234 void inlineCall(Node* callTargetNode, int resultOperand, CallVariant, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind, BasicBlock* continuationBlock, const ChecksFunctor& insertChecks);
235 // Handle intrinsic functions. Return true if it succeeded, false if we need to plant a call.
236 template<typename ChecksFunctor>
237 bool handleIntrinsicCall(Node* callee, int resultOperand, Intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks);
238 template<typename ChecksFunctor>
239 bool handleDOMJITCall(Node* callee, int resultOperand, const DOMJIT::Signature*, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks);
240 template<typename ChecksFunctor>
241 bool handleIntrinsicGetter(int resultOperand, SpeculatedType prediction, const GetByIdVariant& intrinsicVariant, Node* thisNode, const ChecksFunctor& insertChecks);
242 template<typename ChecksFunctor>
243 bool handleTypedArrayConstructor(int resultOperand, InternalFunction*, int registerOffset, int argumentCountIncludingThis, TypedArrayType, const ChecksFunctor& insertChecks);
244 template<typename ChecksFunctor>
245 bool handleConstantInternalFunction(Node* callTargetNode, int resultOperand, InternalFunction*, int registerOffset, int argumentCountIncludingThis, CodeSpecializationKind, SpeculatedType, const ChecksFunctor& insertChecks);
246 Node* handlePutByOffset(Node* base, unsigned identifier, PropertyOffset, const InferredType::Descriptor&, Node* value);
247 Node* handleGetByOffset(SpeculatedType, Node* base, unsigned identifierNumber, PropertyOffset, const InferredType::Descriptor&, NodeType = GetByOffset);
248 bool handleDOMJITGetter(int resultOperand, const GetByIdVariant&, Node* thisNode, unsigned identifierNumber, SpeculatedType prediction);
249 bool handleModuleNamespaceLoad(int resultOperand, SpeculatedType, Node* base, GetByIdStatus);
251 // Create a presence ObjectPropertyCondition based on some known offset and structure set. Does not
252 // check the validity of the condition, but it may return a null one if it encounters a contradiction.
253 ObjectPropertyCondition presenceLike(
254 JSObject* knownBase, UniquedStringImpl*, PropertyOffset, const StructureSet&);
256 // Attempt to watch the presence of a property. It will watch that the property is present in the same
257 // way as in all of the structures in the set. It may emit code instead of just setting a watchpoint.
258 // Returns true if this all works out.
259 bool checkPresenceLike(JSObject* knownBase, UniquedStringImpl*, PropertyOffset, const StructureSet&);
260 void checkPresenceLike(Node* base, UniquedStringImpl*, PropertyOffset, const StructureSet&);
262 // Works with both GetByIdVariant and the setter form of PutByIdVariant.
263 template<typename VariantType>
264 Node* load(SpeculatedType, Node* base, unsigned identifierNumber, const VariantType&);
266 Node* store(Node* base, unsigned identifier, const PutByIdVariant&, Node* value);
269 int destinationOperand, SpeculatedType, Node* base, unsigned identifierNumber, GetByIdStatus, AccessType, unsigned instructionSize);
271 Node* base, unsigned identifierNumber, Node* value, const PutByIdStatus&, bool isDirect);
273 Node* base, unsigned identifierNumber, Node* value, const PutByIdStatus&,
276 // Either register a watchpoint or emit a check for this condition. Returns false if the
277 // condition no longer holds, and therefore no reasonable check can be emitted.
278 bool check(const ObjectPropertyCondition&);
280 GetByOffsetMethod promoteToConstant(GetByOffsetMethod);
282 // Either register a watchpoint or emit a check for this condition. It must be a Presence
283 // condition. It will attempt to promote a Presence condition to an Equivalence condition.
284 // Emits code for the loaded value that the condition guards, and returns a node containing
285 // the loaded value. Returns null if the condition no longer holds.
286 GetByOffsetMethod planLoad(const ObjectPropertyCondition&);
287 Node* load(SpeculatedType, unsigned identifierNumber, const GetByOffsetMethod&, NodeType = GetByOffset);
288 Node* load(SpeculatedType, const ObjectPropertyCondition&, NodeType = GetByOffset);
290 // Calls check() for each condition in the set: that is, it either emits checks or registers
291 // watchpoints (or a combination of the two) to make the conditions hold. If any of those
292 // conditions are no longer checkable, returns false.
293 bool check(const ObjectPropertyConditionSet&);
295 // Calls check() for those conditions that aren't the slot base, and calls load() for the slot
296 // base. Does a combination of watchpoint registration and check emission to guard the
297 // conditions, and emits code to load the value from the slot base. Returns a node containing
298 // the loaded value. Returns null if any of the conditions were no longer checkable.
299 GetByOffsetMethod planLoad(const ObjectPropertyConditionSet&);
300 Node* load(SpeculatedType, const ObjectPropertyConditionSet&, NodeType = GetByOffset);
302 void prepareToParseBlock();
305 // Parse a single basic block of bytecode instructions.
306 void parseBlock(unsigned limit);
307 // Link block successors.
308 void linkBlock(BasicBlock*, Vector<BasicBlock*>& possibleTargets);
309 void linkBlocks(Vector<BasicBlock*>& unlinkedBlocks, Vector<BasicBlock*>& possibleTargets);
311 VariableAccessData* newVariableAccessData(VirtualRegister operand)
313 ASSERT(!operand.isConstant());
315 m_graph.m_variableAccessData.append(VariableAccessData(operand));
316 return &m_graph.m_variableAccessData.last();
319 // Get/Set the operands/result of a bytecode instruction.
320 Node* getDirect(VirtualRegister operand)
322 ASSERT(!operand.isConstant());
324 // Is this an argument?
325 if (operand.isArgument())
326 return getArgument(operand);
329 return getLocal(operand);
332 Node* get(VirtualRegister operand)
334 if (operand.isConstant()) {
335 unsigned constantIndex = operand.toConstantIndex();
336 unsigned oldSize = m_constants.size();
337 if (constantIndex >= oldSize || !m_constants[constantIndex]) {
338 const CodeBlock& codeBlock = *m_inlineStackTop->m_codeBlock;
339 JSValue value = codeBlock.getConstant(operand.offset());
340 SourceCodeRepresentation sourceCodeRepresentation = codeBlock.constantSourceCodeRepresentation(operand.offset());
341 if (constantIndex >= oldSize) {
342 m_constants.grow(constantIndex + 1);
343 for (unsigned i = oldSize; i < m_constants.size(); ++i)
344 m_constants[i] = nullptr;
347 Node* constantNode = nullptr;
348 if (sourceCodeRepresentation == SourceCodeRepresentation::Double)
349 constantNode = addToGraph(DoubleConstant, OpInfo(m_graph.freezeStrong(jsDoubleNumber(value.asNumber()))));
351 constantNode = addToGraph(JSConstant, OpInfo(m_graph.freezeStrong(value)));
352 m_constants[constantIndex] = constantNode;
354 ASSERT(m_constants[constantIndex]);
355 return m_constants[constantIndex];
358 if (inlineCallFrame()) {
359 if (!inlineCallFrame()->isClosureCall) {
360 JSFunction* callee = inlineCallFrame()->calleeConstant();
361 if (operand.offset() == CallFrameSlot::callee)
362 return weakJSConstant(callee);
364 } else if (operand.offset() == CallFrameSlot::callee) {
365 // We have to do some constant-folding here because this enables CreateThis folding. Note
366 // that we don't have such watchpoint-based folding for inlined uses of Callee, since in that
367 // case if the function is a singleton then we already know it.
368 if (FunctionExecutable* executable = jsDynamicCast<FunctionExecutable*>(*m_vm, m_codeBlock->ownerExecutable())) {
369 InferredValue* singleton = executable->singletonFunction();
370 if (JSValue value = singleton->inferredValue()) {
371 m_graph.watchpoints().addLazily(singleton);
372 JSFunction* function = jsCast<JSFunction*>(value);
373 return weakJSConstant(function);
376 return addToGraph(GetCallee);
379 return getDirect(m_inlineStackTop->remapOperand(operand));
383 // A normal set which follows a two-phase commit that spans code origins. During
384 // the current code origin it issues a MovHint, and at the start of the next
385 // code origin there will be a SetLocal. If the local needs flushing, the second
386 // SetLocal will be preceded with a Flush.
389 // A set where the SetLocal happens immediately and there is still a Flush. This
390 // is relevant when assigning to a local in tricky situations for the delayed
391 // SetLocal logic but where we know that we have not performed any side effects
392 // within this code origin. This is a safe replacement for NormalSet anytime we
393 // know that we have not yet performed side effects in this code origin.
394 ImmediateSetWithFlush,
396 // A set where the SetLocal happens immediately and we do not Flush it even if
397 // this is a local that is marked as needing it. This is relevant when
398 // initializing locals at the top of a function.
401 Node* setDirect(VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
403 addToGraph(MovHint, OpInfo(operand.offset()), value);
405 // We can't exit anymore because our OSR exit state has changed.
408 DelayedSetLocal delayed(currentCodeOrigin(), operand, value, setMode);
410 if (setMode == NormalSet) {
411 m_setLocalQueue.append(delayed);
415 return delayed.execute(this);
418 void processSetLocalQueue()
420 for (unsigned i = 0; i < m_setLocalQueue.size(); ++i)
421 m_setLocalQueue[i].execute(this);
422 m_setLocalQueue.shrink(0);
425 Node* set(VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
427 return setDirect(m_inlineStackTop->remapOperand(operand), value, setMode);
430 Node* injectLazyOperandSpeculation(Node* node)
432 ASSERT(node->op() == GetLocal);
433 ASSERT(node->origin.semantic.bytecodeIndex == m_currentIndex);
434 ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
435 LazyOperandValueProfileKey key(m_currentIndex, node->local());
436 SpeculatedType prediction = m_inlineStackTop->m_lazyOperands.prediction(locker, key);
437 node->variableAccessData()->predict(prediction);
441 // Used in implementing get/set, above, where the operand is a local variable.
442 Node* getLocal(VirtualRegister operand)
444 unsigned local = operand.toLocal();
446 Node* node = m_currentBlock->variablesAtTail.local(local);
448 // This has two goals: 1) link together variable access datas, and 2)
449 // try to avoid creating redundant GetLocals. (1) is required for
450 // correctness - no other phase will ensure that block-local variable
451 // access data unification is done correctly. (2) is purely opportunistic
452 // and is meant as an compile-time optimization only.
454 VariableAccessData* variable;
457 variable = node->variableAccessData();
459 switch (node->op()) {
463 return node->child1().node();
468 variable = newVariableAccessData(operand);
470 node = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variable)));
471 m_currentBlock->variablesAtTail.local(local) = node;
474 Node* setLocal(const CodeOrigin& semanticOrigin, VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
476 SetForScope<CodeOrigin> originChange(m_currentSemanticOrigin, semanticOrigin);
478 unsigned local = operand.toLocal();
480 if (setMode != ImmediateNakedSet) {
481 ArgumentPosition* argumentPosition = findArgumentPositionForLocal(operand);
482 if (argumentPosition)
483 flushDirect(operand, argumentPosition);
484 else if (m_graph.needsScopeRegister() && operand == m_codeBlock->scopeRegister())
488 VariableAccessData* variableAccessData = newVariableAccessData(operand);
489 variableAccessData->mergeStructureCheckHoistingFailed(
490 m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex, BadCache));
491 variableAccessData->mergeCheckArrayHoistingFailed(
492 m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex, BadIndexingType));
493 Node* node = addToGraph(SetLocal, OpInfo(variableAccessData), value);
494 m_currentBlock->variablesAtTail.local(local) = node;
498 // Used in implementing get/set, above, where the operand is an argument.
499 Node* getArgument(VirtualRegister operand)
501 unsigned argument = operand.toArgument();
502 ASSERT(argument < m_numArguments);
504 Node* node = m_currentBlock->variablesAtTail.argument(argument);
506 VariableAccessData* variable;
509 variable = node->variableAccessData();
511 switch (node->op()) {
515 return node->child1().node();
520 variable = newVariableAccessData(operand);
522 node = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variable)));
523 m_currentBlock->variablesAtTail.argument(argument) = node;
526 Node* setArgument(const CodeOrigin& semanticOrigin, VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
528 SetForScope<CodeOrigin> originChange(m_currentSemanticOrigin, semanticOrigin);
530 unsigned argument = operand.toArgument();
531 ASSERT(argument < m_numArguments);
533 VariableAccessData* variableAccessData = newVariableAccessData(operand);
535 // Always flush arguments, except for 'this'. If 'this' is created by us,
536 // then make sure that it's never unboxed.
537 if (argument || m_graph.needsFlushedThis()) {
538 if (setMode != ImmediateNakedSet)
539 flushDirect(operand);
542 if (!argument && m_codeBlock->specializationKind() == CodeForConstruct)
543 variableAccessData->mergeShouldNeverUnbox(true);
545 variableAccessData->mergeStructureCheckHoistingFailed(
546 m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex, BadCache));
547 variableAccessData->mergeCheckArrayHoistingFailed(
548 m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex, BadIndexingType));
549 Node* node = addToGraph(SetLocal, OpInfo(variableAccessData), value);
550 m_currentBlock->variablesAtTail.argument(argument) = node;
554 ArgumentPosition* findArgumentPositionForArgument(int argument)
556 InlineStackEntry* stack = m_inlineStackTop;
557 while (stack->m_inlineCallFrame)
558 stack = stack->m_caller;
559 return stack->m_argumentPositions[argument];
562 ArgumentPosition* findArgumentPositionForLocal(VirtualRegister operand)
564 for (InlineStackEntry* stack = m_inlineStackTop; ; stack = stack->m_caller) {
565 InlineCallFrame* inlineCallFrame = stack->m_inlineCallFrame;
566 if (!inlineCallFrame)
568 if (operand.offset() < static_cast<int>(inlineCallFrame->stackOffset + CallFrame::headerSizeInRegisters))
570 if (operand.offset() == inlineCallFrame->stackOffset + CallFrame::thisArgumentOffset())
572 if (operand.offset() >= static_cast<int>(inlineCallFrame->stackOffset + CallFrame::thisArgumentOffset() + inlineCallFrame->argumentsWithFixup.size()))
574 int argument = VirtualRegister(operand.offset() - inlineCallFrame->stackOffset).toArgument();
575 return stack->m_argumentPositions[argument];
580 ArgumentPosition* findArgumentPosition(VirtualRegister operand)
582 if (operand.isArgument())
583 return findArgumentPositionForArgument(operand.toArgument());
584 return findArgumentPositionForLocal(operand);
587 void flush(VirtualRegister operand)
589 flushDirect(m_inlineStackTop->remapOperand(operand));
592 void flushDirect(VirtualRegister operand)
594 flushDirect(operand, findArgumentPosition(operand));
597 void flushDirect(VirtualRegister operand, ArgumentPosition* argumentPosition)
599 addFlushOrPhantomLocal<Flush>(operand, argumentPosition);
602 template<NodeType nodeType>
603 void addFlushOrPhantomLocal(VirtualRegister operand, ArgumentPosition* argumentPosition)
605 ASSERT(!operand.isConstant());
607 Node* node = m_currentBlock->variablesAtTail.operand(operand);
609 VariableAccessData* variable;
612 variable = node->variableAccessData();
614 variable = newVariableAccessData(operand);
616 node = addToGraph(nodeType, OpInfo(variable));
617 m_currentBlock->variablesAtTail.operand(operand) = node;
618 if (argumentPosition)
619 argumentPosition->addVariable(variable);
622 void phantomLocalDirect(VirtualRegister operand)
624 addFlushOrPhantomLocal<PhantomLocal>(operand, findArgumentPosition(operand));
627 void flush(InlineStackEntry* inlineStackEntry)
630 if (InlineCallFrame* inlineCallFrame = inlineStackEntry->m_inlineCallFrame) {
631 ASSERT(!m_hasDebuggerEnabled);
632 numArguments = inlineCallFrame->argumentsWithFixup.size();
633 if (inlineCallFrame->isClosureCall)
634 flushDirect(inlineStackEntry->remapOperand(VirtualRegister(CallFrameSlot::callee)));
635 if (inlineCallFrame->isVarargs())
636 flushDirect(inlineStackEntry->remapOperand(VirtualRegister(CallFrameSlot::argumentCount)));
638 numArguments = inlineStackEntry->m_codeBlock->numParameters();
639 for (unsigned argument = numArguments; argument-- > 1;)
640 flushDirect(inlineStackEntry->remapOperand(virtualRegisterForArgument(argument)));
641 if (!inlineStackEntry->m_inlineCallFrame && m_graph.needsFlushedThis())
642 flushDirect(virtualRegisterForArgument(0));
644 phantomLocalDirect(virtualRegisterForArgument(0));
646 if (m_graph.needsScopeRegister())
647 flushDirect(m_codeBlock->scopeRegister());
650 void flushForTerminal()
652 CodeOrigin origin = currentCodeOrigin();
653 unsigned bytecodeIndex = origin.bytecodeIndex;
655 for (InlineStackEntry* inlineStackEntry = m_inlineStackTop; inlineStackEntry; inlineStackEntry = inlineStackEntry->m_caller) {
656 flush(inlineStackEntry);
658 ASSERT(origin.inlineCallFrame == inlineStackEntry->m_inlineCallFrame);
659 InlineCallFrame* inlineCallFrame = inlineStackEntry->m_inlineCallFrame;
660 CodeBlock* codeBlock = m_graph.baselineCodeBlockFor(inlineCallFrame);
661 FullBytecodeLiveness& fullLiveness = m_graph.livenessFor(codeBlock);
662 const FastBitVector& livenessAtBytecode = fullLiveness.getLiveness(bytecodeIndex);
664 for (unsigned local = codeBlock->m_numCalleeLocals; local--;) {
665 if (livenessAtBytecode[local]) {
666 VirtualRegister reg = virtualRegisterForLocal(local);
668 reg = inlineStackEntry->remapOperand(reg);
669 phantomLocalDirect(reg);
673 if (inlineCallFrame) {
674 bytecodeIndex = inlineCallFrame->directCaller.bytecodeIndex;
675 origin = inlineCallFrame->directCaller;
680 void flushForReturn()
682 flush(m_inlineStackTop);
685 void flushIfTerminal(SwitchData& data)
687 if (data.fallThrough.bytecodeIndex() > m_currentIndex)
690 for (unsigned i = data.cases.size(); i--;) {
691 if (data.cases[i].target.bytecodeIndex() > m_currentIndex)
698 // Assumes that the constant should be strongly marked.
699 Node* jsConstant(JSValue constantValue)
701 return addToGraph(JSConstant, OpInfo(m_graph.freezeStrong(constantValue)));
704 Node* weakJSConstant(JSValue constantValue)
706 return addToGraph(JSConstant, OpInfo(m_graph.freeze(constantValue)));
709 // Helper functions to get/set the this value.
712 return get(m_inlineStackTop->m_codeBlock->thisRegister());
715 void setThis(Node* value)
717 set(m_inlineStackTop->m_codeBlock->thisRegister(), value);
720 InlineCallFrame* inlineCallFrame()
722 return m_inlineStackTop->m_inlineCallFrame;
725 bool allInlineFramesAreTailCalls()
727 return !inlineCallFrame() || !inlineCallFrame()->getCallerSkippingTailCalls();
730 CodeOrigin currentCodeOrigin()
732 return CodeOrigin(m_currentIndex, inlineCallFrame());
735 NodeOrigin currentNodeOrigin()
740 if (m_currentSemanticOrigin.isSet())
741 semantic = m_currentSemanticOrigin;
743 semantic = currentCodeOrigin();
745 forExit = currentCodeOrigin();
747 return NodeOrigin(semantic, forExit, m_exitOK);
750 BranchData* branchData(unsigned taken, unsigned notTaken)
752 // We assume that branches originating from bytecode always have a fall-through. We
753 // use this assumption to avoid checking for the creation of terminal blocks.
754 ASSERT((taken > m_currentIndex) || (notTaken > m_currentIndex));
755 BranchData* data = m_graph.m_branchData.add();
756 *data = BranchData::withBytecodeIndices(taken, notTaken);
760 Node* addToGraph(Node* node)
762 VERBOSE_LOG(" appended ", node, " ", Graph::opName(node->op()), "\n");
763 m_currentBlock->append(node);
764 if (clobbersExitState(m_graph, node))
769 Node* addToGraph(NodeType op, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
771 Node* result = m_graph.addNode(
772 op, currentNodeOrigin(), Edge(child1), Edge(child2),
774 return addToGraph(result);
776 Node* addToGraph(NodeType op, Edge child1, Edge child2 = Edge(), Edge child3 = Edge())
778 Node* result = m_graph.addNode(
779 op, currentNodeOrigin(), child1, child2, child3);
780 return addToGraph(result);
782 Node* addToGraph(NodeType op, OpInfo info, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
784 Node* result = m_graph.addNode(
785 op, currentNodeOrigin(), info, Edge(child1), Edge(child2),
787 return addToGraph(result);
789 Node* addToGraph(NodeType op, OpInfo info, Edge child1, Edge child2 = Edge(), Edge child3 = Edge())
791 Node* result = m_graph.addNode(op, currentNodeOrigin(), info, child1, child2, child3);
792 return addToGraph(result);
794 Node* addToGraph(NodeType op, OpInfo info1, OpInfo info2, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
796 Node* result = m_graph.addNode(
797 op, currentNodeOrigin(), info1, info2,
798 Edge(child1), Edge(child2), Edge(child3));
799 return addToGraph(result);
801 Node* addToGraph(NodeType op, OpInfo info1, OpInfo info2, Edge child1, Edge child2 = Edge(), Edge child3 = Edge())
803 Node* result = m_graph.addNode(
804 op, currentNodeOrigin(), info1, info2, child1, child2, child3);
805 return addToGraph(result);
808 Node* addToGraph(Node::VarArgTag, NodeType op, OpInfo info1, OpInfo info2 = OpInfo())
810 Node* result = m_graph.addNode(
811 Node::VarArg, op, currentNodeOrigin(), info1, info2,
812 m_graph.m_varArgChildren.size() - m_numPassedVarArgs, m_numPassedVarArgs);
815 m_numPassedVarArgs = 0;
820 void addVarArgChild(Node* child)
822 m_graph.m_varArgChildren.append(Edge(child));
823 m_numPassedVarArgs++;
826 Node* addCallWithoutSettingResult(
827 NodeType op, OpInfo opInfo, Node* callee, int argCount, int registerOffset,
830 addVarArgChild(callee);
831 size_t parameterSlots = Graph::parameterSlotsForArgCount(argCount);
833 if (parameterSlots > m_parameterSlots)
834 m_parameterSlots = parameterSlots;
836 for (int i = 0; i < argCount; ++i)
837 addVarArgChild(get(virtualRegisterForArgument(i, registerOffset)));
839 return addToGraph(Node::VarArg, op, opInfo, prediction);
843 int result, NodeType op, const DOMJIT::Signature* signature, Node* callee, int argCount, int registerOffset,
844 SpeculatedType prediction)
846 if (op == TailCall) {
847 if (allInlineFramesAreTailCalls())
848 return addCallWithoutSettingResult(op, OpInfo(signature), callee, argCount, registerOffset, OpInfo());
849 op = TailCallInlinedCaller;
853 Node* call = addCallWithoutSettingResult(
854 op, OpInfo(signature), callee, argCount, registerOffset, OpInfo(prediction));
855 VirtualRegister resultReg(result);
856 if (resultReg.isValid())
857 set(resultReg, call);
861 Node* cellConstantWithStructureCheck(JSCell* object, Structure* structure)
863 // FIXME: This should route to emitPropertyCheck, not the other way around. But currently,
864 // this gets no profit from using emitPropertyCheck() since we'll non-adaptively watch the
865 // object's structure as soon as we make it a weakJSCosntant.
866 Node* objectNode = weakJSConstant(object);
867 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(structure)), objectNode);
871 SpeculatedType getPredictionWithoutOSRExit(unsigned bytecodeIndex)
873 SpeculatedType prediction;
875 ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
876 prediction = m_inlineStackTop->m_profiledBlock->valueProfilePredictionForBytecodeOffset(locker, bytecodeIndex);
879 if (prediction != SpecNone)
882 // If we have no information about the values this
883 // node generates, we check if by any chance it is
884 // a tail call opcode. In that case, we walk up the
885 // inline frames to find a call higher in the call
886 // chain and use its prediction. If we only have
887 // inlined tail call frames, we use SpecFullTop
888 // to avoid a spurious OSR exit.
889 Instruction* instruction = m_inlineStackTop->m_profiledBlock->instructions().begin() + bytecodeIndex;
890 OpcodeID opcodeID = Interpreter::getOpcodeID(instruction->u.opcode);
894 case op_tail_call_varargs:
895 case op_tail_call_forward_arguments: {
896 // Things should be more permissive to us returning BOTTOM instead of TOP here.
897 // Currently, this will cause us to Force OSR exit. This is bad because returning
898 // TOP will cause anything that transitively touches this speculated type to
899 // also become TOP during prediction propagation.
900 // https://bugs.webkit.org/show_bug.cgi?id=164337
901 if (!inlineCallFrame())
904 CodeOrigin* codeOrigin = inlineCallFrame()->getCallerSkippingTailCalls();
908 InlineStackEntry* stack = m_inlineStackTop;
909 while (stack->m_inlineCallFrame != codeOrigin->inlineCallFrame)
910 stack = stack->m_caller;
912 bytecodeIndex = codeOrigin->bytecodeIndex;
913 CodeBlock* profiledBlock = stack->m_profiledBlock;
914 ConcurrentJSLocker locker(profiledBlock->m_lock);
915 return profiledBlock->valueProfilePredictionForBytecodeOffset(locker, bytecodeIndex);
922 RELEASE_ASSERT_NOT_REACHED();
926 SpeculatedType getPrediction(unsigned bytecodeIndex)
928 SpeculatedType prediction = getPredictionWithoutOSRExit(bytecodeIndex);
930 if (prediction == SpecNone) {
931 // We have no information about what values this node generates. Give up
932 // on executing this code, since we're likely to do more damage than good.
933 addToGraph(ForceOSRExit);
939 SpeculatedType getPredictionWithoutOSRExit()
941 return getPredictionWithoutOSRExit(m_currentIndex);
944 SpeculatedType getPrediction()
946 return getPrediction(m_currentIndex);
949 ArrayMode getArrayMode(ArrayProfile* profile, Array::Action action)
951 ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
952 profile->computeUpdatedPrediction(locker, m_inlineStackTop->m_profiledBlock);
953 bool makeSafe = profile->outOfBounds(locker);
954 return ArrayMode::fromObserved(locker, profile, action, makeSafe);
957 ArrayMode getArrayMode(ArrayProfile* profile)
959 return getArrayMode(profile, Array::Read);
962 Node* makeSafe(Node* node)
964 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
965 node->mergeFlags(NodeMayOverflowInt32InDFG);
966 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
967 node->mergeFlags(NodeMayNegZeroInDFG);
969 if (!isX86() && node->op() == ArithMod)
973 ArithProfile* arithProfile = m_inlineStackTop->m_profiledBlock->arithProfileForBytecodeOffset(m_currentIndex);
975 switch (node->op()) {
979 if (arithProfile->didObserveDouble())
980 node->mergeFlags(NodeMayHaveDoubleResult);
981 if (arithProfile->didObserveNonNumber())
982 node->mergeFlags(NodeMayHaveNonNumberResult);
986 if (arithProfile->didObserveInt52Overflow())
987 node->mergeFlags(NodeMayOverflowInt52);
988 if (arithProfile->didObserveInt32Overflow() || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
989 node->mergeFlags(NodeMayOverflowInt32InBaseline);
990 if (arithProfile->didObserveNegZeroDouble() || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
991 node->mergeFlags(NodeMayNegZeroInBaseline);
992 if (arithProfile->didObserveDouble())
993 node->mergeFlags(NodeMayHaveDoubleResult);
994 if (arithProfile->didObserveNonNumber())
995 node->mergeFlags(NodeMayHaveNonNumberResult);
999 ASSERT_WITH_MESSAGE(!arithProfile->didObserveNonNumber(), "op_negate starts with a toNumber() on the argument, it should only produce numbers.");
1001 if (arithProfile->lhsObservedType().sawNumber() || arithProfile->didObserveDouble())
1002 node->mergeFlags(NodeMayHaveDoubleResult);
1003 if (arithProfile->didObserveNegZeroDouble() || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
1004 node->mergeFlags(NodeMayNegZeroInBaseline);
1005 if (arithProfile->didObserveInt32Overflow() || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
1006 node->mergeFlags(NodeMayOverflowInt32InBaseline);
1016 if (m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)) {
1017 switch (node->op()) {
1018 case UInt32ToNumber:
1022 case ArithMod: // for ArithMod "MayOverflow" means we tried to divide by zero, or we saw double.
1023 node->mergeFlags(NodeMayOverflowInt32InBaseline);
1034 Node* makeDivSafe(Node* node)
1036 ASSERT(node->op() == ArithDiv);
1038 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
1039 node->mergeFlags(NodeMayOverflowInt32InDFG);
1040 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
1041 node->mergeFlags(NodeMayNegZeroInDFG);
1043 // The main slow case counter for op_div in the old JIT counts only when
1044 // the operands are not numbers. We don't care about that since we already
1045 // have speculations in place that take care of that separately. We only
1046 // care about when the outcome of the division is not an integer, which
1047 // is what the special fast case counter tells us.
1049 if (!m_inlineStackTop->m_profiledBlock->couldTakeSpecialFastCase(m_currentIndex))
1052 // FIXME: It might be possible to make this more granular.
1053 node->mergeFlags(NodeMayOverflowInt32InBaseline | NodeMayNegZeroInBaseline);
1058 void noticeArgumentsUse()
1060 // All of the arguments in this function need to be formatted as JSValues because we will
1061 // load from them in a random-access fashion and we don't want to have to switch on
1064 for (ArgumentPosition* argument : m_inlineStackTop->m_argumentPositions)
1065 argument->mergeShouldNeverUnbox(true);
1068 bool needsDynamicLookup(ResolveType, OpcodeID);
1071 CodeBlock* m_codeBlock;
1072 CodeBlock* m_profiledBlock;
1075 // The current block being generated.
1076 BasicBlock* m_currentBlock;
1077 // The bytecode index of the current instruction being generated.
1078 unsigned m_currentIndex;
1079 // The semantic origin of the current node if different from the current Index.
1080 CodeOrigin m_currentSemanticOrigin;
1081 // True if it's OK to OSR exit right now.
1082 bool m_exitOK { false };
1084 FrozenValue* m_constantUndefined;
1085 FrozenValue* m_constantNull;
1086 FrozenValue* m_constantNaN;
1087 FrozenValue* m_constantOne;
1088 Vector<Node*, 16> m_constants;
1090 // The number of arguments passed to the function.
1091 unsigned m_numArguments;
1092 // The number of locals (vars + temporaries) used in the function.
1093 unsigned m_numLocals;
1094 // The number of slots (in units of sizeof(Register)) that we need to
1095 // preallocate for arguments to outgoing calls from this frame. This
1096 // number includes the CallFrame slots that we initialize for the callee
1097 // (but not the callee-initialized CallerFrame and ReturnPC slots).
1098 // This number is 0 if and only if this function is a leaf.
1099 unsigned m_parameterSlots;
1100 // The number of var args passed to the next var arg node.
1101 unsigned m_numPassedVarArgs;
1103 HashMap<ConstantBufferKey, unsigned> m_constantBufferCache;
1105 struct InlineStackEntry {
1106 ByteCodeParser* m_byteCodeParser;
1108 CodeBlock* m_codeBlock;
1109 CodeBlock* m_profiledBlock;
1110 InlineCallFrame* m_inlineCallFrame;
1112 ScriptExecutable* executable() { return m_codeBlock->ownerScriptExecutable(); }
1114 QueryableExitProfile m_exitProfile;
1116 // Remapping of identifier and constant numbers from the code block being
1117 // inlined (inline callee) to the code block that we're inlining into
1118 // (the machine code block, which is the transitive, though not necessarily
1120 Vector<unsigned> m_identifierRemap;
1121 Vector<unsigned> m_constantBufferRemap;
1122 Vector<unsigned> m_switchRemap;
1124 // These are blocks whose terminal is a Jump, Branch or Switch, and whose target has not yet been linked.
1125 // Their terminal instead refers to a bytecode index, and the right BB can be found in m_blockLinkingTargets.
1126 Vector<BasicBlock*> m_unlinkedBlocks;
1128 // Potential block linking targets. Must be sorted by bytecodeBegin, and
1129 // cannot have two blocks that have the same bytecodeBegin.
1130 Vector<BasicBlock*> m_blockLinkingTargets;
1132 // Optional: a continuation block for returns to jump to. It is set by early returns if it does not exist.
1133 BasicBlock* m_continuationBlock;
1135 VirtualRegister m_returnValue;
1137 // Speculations about variable types collected from the profiled code block,
1138 // which are based on OSR exit profiles that past DFG compilations of this
1139 // code block had gathered.
1140 LazyOperandValueProfileParser m_lazyOperands;
1142 CallLinkInfoMap m_callLinkInfos;
1143 StubInfoMap m_stubInfos;
1144 ByValInfoMap m_byValInfos;
1146 // Pointers to the argument position trackers for this slice of code.
1147 Vector<ArgumentPosition*> m_argumentPositions;
1149 InlineStackEntry* m_caller;
1154 CodeBlock* profiledBlock,
1155 JSFunction* callee, // Null if this is a closure call.
1156 VirtualRegister returnValueVR,
1157 VirtualRegister inlineCallFrameStart,
1158 int argumentCountIncludingThis,
1159 InlineCallFrame::Kind,
1160 BasicBlock* continuationBlock);
1164 m_byteCodeParser->m_inlineStackTop = m_caller;
1167 VirtualRegister remapOperand(VirtualRegister operand) const
1169 if (!m_inlineCallFrame)
1172 ASSERT(!operand.isConstant());
1174 return VirtualRegister(operand.offset() + m_inlineCallFrame->stackOffset);
1178 InlineStackEntry* m_inlineStackTop;
1180 struct DelayedSetLocal {
1181 CodeOrigin m_origin;
1182 VirtualRegister m_operand;
1186 DelayedSetLocal() { }
1187 DelayedSetLocal(const CodeOrigin& origin, VirtualRegister operand, Node* value, SetMode setMode)
1189 , m_operand(operand)
1191 , m_setMode(setMode)
1193 RELEASE_ASSERT(operand.isValid());
1196 Node* execute(ByteCodeParser* parser)
1198 if (m_operand.isArgument())
1199 return parser->setArgument(m_origin, m_operand, m_value, m_setMode);
1200 return parser->setLocal(m_origin, m_operand, m_value, m_setMode);
1204 Vector<DelayedSetLocal, 2> m_setLocalQueue;
1206 CodeBlock* m_dfgCodeBlock;
1207 CallLinkStatus::ContextMap m_callContextMap;
1208 StubInfoMap m_dfgStubInfos;
1210 Instruction* m_currentInstruction;
1211 bool m_hasDebuggerEnabled;
1214 BasicBlock* ByteCodeParser::allocateTargetableBlock(unsigned bytecodeIndex)
1216 ASSERT(bytecodeIndex != UINT_MAX);
1217 Ref<BasicBlock> block = adoptRef(*new BasicBlock(bytecodeIndex, m_numArguments, m_numLocals, 1));
1218 BasicBlock* blockPtr = block.ptr();
1219 // m_blockLinkingTargets must always be sorted in increasing order of bytecodeBegin
1220 if (m_inlineStackTop->m_blockLinkingTargets.size())
1221 ASSERT(m_inlineStackTop->m_blockLinkingTargets.last()->bytecodeBegin < bytecodeIndex);
1222 m_inlineStackTop->m_blockLinkingTargets.append(blockPtr);
1223 m_graph.appendBlock(WTFMove(block));
1227 BasicBlock* ByteCodeParser::allocateUntargetableBlock()
1229 Ref<BasicBlock> block = adoptRef(*new BasicBlock(UINT_MAX, m_numArguments, m_numLocals, 1));
1230 BasicBlock* blockPtr = block.ptr();
1231 m_graph.appendBlock(WTFMove(block));
1235 void ByteCodeParser::makeBlockTargetable(BasicBlock* block, unsigned bytecodeIndex)
1237 ASSERT(block->bytecodeBegin == UINT_MAX);
1238 block->bytecodeBegin = bytecodeIndex;
1239 // m_blockLinkingTargets must always be sorted in increasing order of bytecodeBegin
1240 if (m_inlineStackTop->m_blockLinkingTargets.size())
1241 ASSERT(m_inlineStackTop->m_blockLinkingTargets.last()->bytecodeBegin < bytecodeIndex);
1242 m_inlineStackTop->m_blockLinkingTargets.append(block);
1245 void ByteCodeParser::addJumpTo(BasicBlock* block)
1247 ASSERT(!m_currentBlock->terminal());
1248 Node* jumpNode = addToGraph(Jump);
1249 jumpNode->targetBlock() = block;
1250 m_currentBlock->didLink();
1253 void ByteCodeParser::addJumpTo(unsigned bytecodeIndex)
1255 ASSERT(!m_currentBlock->terminal());
1256 addToGraph(Jump, OpInfo(bytecodeIndex));
1257 m_inlineStackTop->m_unlinkedBlocks.append(m_currentBlock);
1260 ByteCodeParser::Terminality ByteCodeParser::handleCall(Instruction* pc, NodeType op, CallMode callMode)
1262 static_assert(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct),
1263 "op_call, op_tail_call and op_construct should always have the same length");
1264 static_assert(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_tail_call),
1265 "op_call, op_tail_call and op_construct should always have the same length");
1267 int result = pc[1].u.operand;
1268 Node* callTarget = get(VirtualRegister(pc[2].u.operand));
1269 int argumentCountIncludingThis = pc[3].u.operand;
1270 int registerOffset = -pc[4].u.operand;
1272 CallLinkStatus callLinkStatus = CallLinkStatus::computeFor(
1273 m_inlineStackTop->m_profiledBlock, currentCodeOrigin(),
1274 m_inlineStackTop->m_callLinkInfos, m_callContextMap);
1276 InlineCallFrame::Kind kind = InlineCallFrame::kindFor(callMode);
1278 return handleCall(result, op, kind, OPCODE_LENGTH(op_call), callTarget,
1279 argumentCountIncludingThis, registerOffset, callLinkStatus, getPrediction());
1282 void ByteCodeParser::refineStatically(CallLinkStatus& callLinkStatus, Node* callTarget)
1284 if (callTarget->isCellConstant())
1285 callLinkStatus.setProvenConstantCallee(CallVariant(callTarget->asCell()));
1288 ByteCodeParser::Terminality ByteCodeParser::handleCall(
1289 int result, NodeType op, InlineCallFrame::Kind kind, unsigned instructionSize,
1290 Node* callTarget, int argumentCountIncludingThis, int registerOffset,
1291 CallLinkStatus callLinkStatus, SpeculatedType prediction)
1293 ASSERT(registerOffset <= 0);
1295 refineStatically(callLinkStatus, callTarget);
1297 VERBOSE_LOG(" Handling call at ", currentCodeOrigin(), ": ", callLinkStatus, "\n");
1299 // We first check that we have profiling information about this call, and that it did not behave too polymorphically.
1300 if (callLinkStatus.canOptimize()) {
1301 VirtualRegister thisArgument = virtualRegisterForArgument(0, registerOffset);
1303 if (op == TailCall && handleRecursiveTailCall(callTarget, callLinkStatus, registerOffset, thisArgument, argumentCountIncludingThis))
1306 // Inlining is quite complex, and managed by a pipeline of functions:
1307 // handle(Varargs)Call -> handleInlining -> attemptToInlineCall -> inlineCall
1308 // - handleCall and handleVarargsCall deal with the case where no inlining happens, and do some sanity checks on their arguments
1309 // - handleInlining checks whether the call is polymorphic, and if so is responsible for inserting a switch on the callee
1310 // - attemptToInlineCall deals with special cases such as intrinsics, it also checks the inlining heuristic (through inliningCost)
1311 // - inlineCall finally does the actual inlining, after a complicated procedure to setup the stack correctly
1312 unsigned nextOffset = m_currentIndex + instructionSize;
1313 if (handleInlining(callTarget, result, callLinkStatus, registerOffset, thisArgument, VirtualRegister(), 0, argumentCountIncludingThis, nextOffset, op, kind, prediction)) {
1314 if (UNLIKELY(m_graph.compilation()))
1315 m_graph.compilation()->noticeInlinedCall();
1320 Node* callNode = addCall(result, op, nullptr, callTarget, argumentCountIncludingThis, registerOffset, prediction);
1321 ASSERT(callNode->op() != TailCallVarargs && callNode->op() != TailCallForwardVarargs);
1322 return callNode->op() == TailCall ? Terminal : NonTerminal;
1325 ByteCodeParser::Terminality ByteCodeParser::handleVarargsCall(Instruction* pc, NodeType op, CallMode callMode)
1327 static_assert(OPCODE_LENGTH(op_call_varargs) == OPCODE_LENGTH(op_construct_varargs),
1328 "op_call_varargs, op_tail_call_varargs and op_construct_varargs should always have the same length");
1329 static_assert(OPCODE_LENGTH(op_call_varargs) == OPCODE_LENGTH(op_tail_call_varargs),
1330 "op_call_varargs, op_tail_call_varargs and op_construct_varargs should always have the same length");
1332 int result = pc[1].u.operand;
1333 int callee = pc[2].u.operand;
1334 int thisReg = pc[3].u.operand;
1335 int arguments = pc[4].u.operand;
1336 int firstFreeReg = pc[5].u.operand;
1337 int firstVarArgOffset = pc[6].u.operand;
1339 SpeculatedType prediction = getPrediction();
1341 Node* callTarget = get(VirtualRegister(callee));
1343 CallLinkStatus callLinkStatus = CallLinkStatus::computeFor(
1344 m_inlineStackTop->m_profiledBlock, currentCodeOrigin(),
1345 m_inlineStackTop->m_callLinkInfos, m_callContextMap);
1346 refineStatically(callLinkStatus, callTarget);
1348 VERBOSE_LOG(" Varargs call link status at ", currentCodeOrigin(), ": ", callLinkStatus, "\n");
1350 if (callLinkStatus.canOptimize()
1351 && handleInlining(callTarget, result, callLinkStatus, firstFreeReg, VirtualRegister(thisReg), VirtualRegister(arguments), firstVarArgOffset, 0, m_currentIndex + OPCODE_LENGTH(op_call_varargs), op, InlineCallFrame::varargsKindFor(callMode), prediction)) {
1352 if (UNLIKELY(m_graph.compilation()))
1353 m_graph.compilation()->noticeInlinedCall();
1357 CallVarargsData* data = m_graph.m_callVarargsData.add();
1358 data->firstVarArgOffset = firstVarArgOffset;
1360 Node* thisChild = get(VirtualRegister(thisReg));
1361 Node* argumentsChild = nullptr;
1362 if (op != TailCallForwardVarargs)
1363 argumentsChild = get(VirtualRegister(arguments));
1365 if (op == TailCallVarargs || op == TailCallForwardVarargs) {
1366 if (allInlineFramesAreTailCalls()) {
1367 addToGraph(op, OpInfo(data), OpInfo(), callTarget, thisChild, argumentsChild);
1370 op = op == TailCallVarargs ? TailCallVarargsInlinedCaller : TailCallForwardVarargsInlinedCaller;
1373 Node* call = addToGraph(op, OpInfo(data), OpInfo(prediction), callTarget, thisChild, argumentsChild);
1374 VirtualRegister resultReg(result);
1375 if (resultReg.isValid())
1376 set(resultReg, call);
1380 void ByteCodeParser::emitFunctionChecks(CallVariant callee, Node* callTarget, VirtualRegister thisArgumentReg)
1383 if (thisArgumentReg.isValid())
1384 thisArgument = get(thisArgumentReg);
1386 thisArgument = nullptr;
1389 Node* callTargetForCheck;
1390 if (callee.isClosureCall()) {
1391 calleeCell = callee.executable();
1392 callTargetForCheck = addToGraph(GetExecutable, callTarget);
1394 calleeCell = callee.nonExecutableCallee();
1395 callTargetForCheck = callTarget;
1399 addToGraph(CheckCell, OpInfo(m_graph.freeze(calleeCell)), callTargetForCheck);
1401 addToGraph(Phantom, thisArgument);
1404 Node* ByteCodeParser::getArgumentCount()
1406 Node* argumentCount;
1407 if (m_inlineStackTop->m_inlineCallFrame) {
1408 if (m_inlineStackTop->m_inlineCallFrame->isVarargs())
1409 argumentCount = get(VirtualRegister(CallFrameSlot::argumentCount));
1411 argumentCount = jsConstant(m_graph.freeze(jsNumber(m_inlineStackTop->m_inlineCallFrame->argumentCountIncludingThis))->value());
1413 argumentCount = addToGraph(GetArgumentCountIncludingThis, OpInfo(0), OpInfo(SpecInt32Only));
1414 return argumentCount;
1417 void ByteCodeParser::emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis)
1419 for (int i = 0; i < argumentCountIncludingThis; ++i)
1420 addToGraph(Phantom, get(virtualRegisterForArgument(i, registerOffset)));
1423 bool ByteCodeParser::handleRecursiveTailCall(Node* callTargetNode, const CallLinkStatus& callLinkStatus, int registerOffset, VirtualRegister thisArgument, int argumentCountIncludingThis)
1425 if (UNLIKELY(!Options::optimizeRecursiveTailCalls()))
1428 // FIXME: We currently only do this optimisation in the simple, non-polymorphic case.
1429 // https://bugs.webkit.org/show_bug.cgi?id=178390
1430 if (callLinkStatus.couldTakeSlowPath() || callLinkStatus.size() != 1)
1433 auto targetExecutable = callLinkStatus[0].executable();
1434 InlineStackEntry* stackEntry = m_inlineStackTop;
1436 if (targetExecutable != stackEntry->executable())
1438 VERBOSE_LOG(" We found a recursive tail call, trying to optimize it into a jump.\n");
1440 if (auto* callFrame = stackEntry->m_inlineCallFrame) {
1441 // Some code may statically use the argument count from the InlineCallFrame, so it would be invalid to loop back if it does not match.
1442 // We "continue" instead of returning false in case another stack entry further on the stack has the right number of arguments.
1443 if (argumentCountIncludingThis != static_cast<int>(callFrame->argumentCountIncludingThis))
1446 // We are in the machine code entry (i.e. the original caller).
1447 // If we have more arguments than the number of parameters to the function, it is not clear where we could put them on the stack.
1448 if (argumentCountIncludingThis > m_codeBlock->numParameters())
1452 // We must add some check that the profiling information was correct and the target of this call is what we thought
1453 emitFunctionChecks(callLinkStatus[0], callTargetNode, thisArgument);
1455 // We must set the arguments to the right values
1457 for (; argIndex < argumentCountIncludingThis; ++argIndex) {
1458 Node* value = get(virtualRegisterForArgument(argIndex, registerOffset));
1459 setDirect(stackEntry->remapOperand(virtualRegisterForArgument(argIndex)), value, NormalSet);
1461 Node* undefined = addToGraph(JSConstant, OpInfo(m_constantUndefined));
1462 for (; argIndex < stackEntry->m_codeBlock->numParameters(); ++argIndex)
1463 setDirect(stackEntry->remapOperand(virtualRegisterForArgument(argIndex)), undefined, NormalSet);
1465 // We must repeat the work of op_enter here as we will jump right after it.
1466 // We jump right after it and not before it, because of some invariant saying that a CFG root cannot have predecessors in the IR.
1467 for (int i = 0; i < stackEntry->m_codeBlock->m_numVars; ++i)
1468 setDirect(stackEntry->remapOperand(virtualRegisterForLocal(i)), undefined, NormalSet);
1470 // We want to emit the SetLocals with an exit origin that points to the place we are jumping to.
1471 unsigned oldIndex = m_currentIndex;
1472 auto oldStackTop = m_inlineStackTop;
1473 m_inlineStackTop = stackEntry;
1474 m_currentIndex = OPCODE_LENGTH(op_enter);
1476 processSetLocalQueue();
1477 m_currentIndex = oldIndex;
1478 m_inlineStackTop = oldStackTop;
1481 // We flush everything, as if we were in the backedge of a loop (see treatment of op_jmp in parseBlock).
1484 BasicBlock** entryBlockPtr = tryBinarySearch<BasicBlock*, unsigned>(stackEntry->m_blockLinkingTargets, stackEntry->m_blockLinkingTargets.size(), OPCODE_LENGTH(op_enter), getBytecodeBeginForBlock);
1485 RELEASE_ASSERT(entryBlockPtr);
1486 addJumpTo(*entryBlockPtr);
1488 // It would be unsound to jump over a non-tail call: the "tail" call is not really a tail call in that case.
1489 } while (stackEntry->m_inlineCallFrame && stackEntry->m_inlineCallFrame->kind == InlineCallFrame::TailCall && (stackEntry = stackEntry->m_caller));
1491 // The tail call was not recursive
1495 unsigned ByteCodeParser::inliningCost(CallVariant callee, int argumentCountIncludingThis, InlineCallFrame::Kind kind)
1497 CallMode callMode = InlineCallFrame::callModeFor(kind);
1498 CodeSpecializationKind specializationKind = specializationKindFor(callMode);
1499 VERBOSE_LOG("Considering inlining ", callee, " into ", currentCodeOrigin(), "\n");
1501 if (m_hasDebuggerEnabled) {
1502 VERBOSE_LOG(" Failing because the debugger is in use.\n");
1506 FunctionExecutable* executable = callee.functionExecutable();
1508 VERBOSE_LOG(" Failing because there is no function executable.\n");
1512 // Do we have a code block, and does the code block's size match the heuristics/requirements for
1513 // being an inline candidate? We might not have a code block (1) if code was thrown away,
1514 // (2) if we simply hadn't actually made this call yet or (3) code is a builtin function and
1515 // specialization kind is construct. In the former 2 cases, we could still theoretically attempt
1516 // to inline it if we had a static proof of what was being called; this might happen for example
1517 // if you call a global function, where watchpointing gives us static information. Overall,
1518 // it's a rare case because we expect that any hot callees would have already been compiled.
1519 CodeBlock* codeBlock = executable->baselineCodeBlockFor(specializationKind);
1521 VERBOSE_LOG(" Failing because no code block available.\n");
1525 if (!Options::useArityFixupInlining()) {
1526 if (codeBlock->numParameters() > argumentCountIncludingThis) {
1527 VERBOSE_LOG(" Failing because of arity mismatch.\n");
1532 CapabilityLevel capabilityLevel = inlineFunctionForCapabilityLevel(
1533 codeBlock, specializationKind, callee.isClosureCall());
1534 VERBOSE_LOG(" Call mode: ", callMode, "\n");
1535 VERBOSE_LOG(" Is closure call: ", callee.isClosureCall(), "\n");
1536 VERBOSE_LOG(" Capability level: ", capabilityLevel, "\n");
1537 VERBOSE_LOG(" Might inline function: ", mightInlineFunctionFor(codeBlock, specializationKind), "\n");
1538 VERBOSE_LOG(" Might compile function: ", mightCompileFunctionFor(codeBlock, specializationKind), "\n");
1539 VERBOSE_LOG(" Is supported for inlining: ", isSupportedForInlining(codeBlock), "\n");
1540 VERBOSE_LOG(" Is inlining candidate: ", codeBlock->ownerScriptExecutable()->isInliningCandidate(), "\n");
1541 if (!canInline(capabilityLevel)) {
1542 VERBOSE_LOG(" Failing because the function is not inlineable.\n");
1546 // Check if the caller is already too large. We do this check here because that's just
1547 // where we happen to also have the callee's code block, and we want that for the
1548 // purpose of unsetting SABI.
1549 if (!isSmallEnoughToInlineCodeInto(m_codeBlock)) {
1550 codeBlock->m_shouldAlwaysBeInlined = false;
1551 VERBOSE_LOG(" Failing because the caller is too large.\n");
1555 // FIXME: this should be better at predicting how much bloat we will introduce by inlining
1557 // https://bugs.webkit.org/show_bug.cgi?id=127627
1559 // FIXME: We currently inline functions that have run in LLInt but not in Baseline. These
1560 // functions have very low fidelity profiling, and presumably they weren't very hot if they
1561 // haven't gotten to Baseline yet. Consider not inlining these functions.
1562 // https://bugs.webkit.org/show_bug.cgi?id=145503
1564 // Have we exceeded inline stack depth, or are we trying to inline a recursive call to
1565 // too many levels? If either of these are detected, then don't inline. We adjust our
1566 // heuristics if we are dealing with a function that cannot otherwise be compiled.
1569 unsigned recursion = 0;
1571 for (InlineStackEntry* entry = m_inlineStackTop; entry; entry = entry->m_caller) {
1573 if (depth >= Options::maximumInliningDepth()) {
1574 VERBOSE_LOG(" Failing because depth exceeded.\n");
1578 if (entry->executable() == executable) {
1580 if (recursion >= Options::maximumInliningRecursion()) {
1581 VERBOSE_LOG(" Failing because recursion detected.\n");
1587 VERBOSE_LOG(" Inlining should be possible.\n");
1589 // It might be possible to inline.
1590 return codeBlock->instructionCount();
1593 template<typename ChecksFunctor>
1594 void ByteCodeParser::inlineCall(Node* callTargetNode, int resultOperand, CallVariant callee, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind kind, BasicBlock* continuationBlock, const ChecksFunctor& insertChecks)
1596 CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1598 ASSERT(inliningCost(callee, argumentCountIncludingThis, kind) != UINT_MAX);
1600 CodeBlock* codeBlock = callee.functionExecutable()->baselineCodeBlockFor(specializationKind);
1601 insertChecks(codeBlock);
1603 // FIXME: Don't flush constants!
1605 // arityFixupCount and numberOfStackPaddingSlots are different. While arityFixupCount does not consider about stack alignment,
1606 // numberOfStackPaddingSlots consider alignment. Consider the following case,
1608 // before: [ ... ][arg0][header]
1609 // after: [ ... ][ext ][arg1][arg0][header]
1611 // In the above case, arityFixupCount is 1. But numberOfStackPaddingSlots is 2 because the stack needs to be aligned.
1612 // We insert extra slots to align stack.
1613 int arityFixupCount = std::max<int>(codeBlock->numParameters() - argumentCountIncludingThis, 0);
1614 int numberOfStackPaddingSlots = CommonSlowPaths::numberOfStackPaddingSlots(codeBlock, argumentCountIncludingThis);
1615 ASSERT(!(numberOfStackPaddingSlots % stackAlignmentRegisters()));
1616 int registerOffsetAfterFixup = registerOffset - numberOfStackPaddingSlots;
1618 int inlineCallFrameStart = m_inlineStackTop->remapOperand(VirtualRegister(registerOffsetAfterFixup)).offset() + CallFrame::headerSizeInRegisters;
1621 VirtualRegister(inlineCallFrameStart).toLocal() + 1 +
1622 CallFrame::headerSizeInRegisters + codeBlock->m_numCalleeLocals);
1624 size_t argumentPositionStart = m_graph.m_argumentPositions.size();
1626 VirtualRegister resultReg(resultOperand);
1627 if (resultReg.isValid())
1628 resultReg = m_inlineStackTop->remapOperand(resultReg);
1630 VariableAccessData* calleeVariable = nullptr;
1631 if (callee.isClosureCall()) {
1632 Node* calleeSet = set(
1633 VirtualRegister(registerOffsetAfterFixup + CallFrameSlot::callee), callTargetNode, ImmediateNakedSet);
1635 calleeVariable = calleeSet->variableAccessData();
1636 calleeVariable->mergeShouldNeverUnbox(true);
1639 if (arityFixupCount) {
1640 // Note: we do arity fixup in two phases:
1641 // 1. We get all the values we need and MovHint them to the expected locals.
1642 // 2. We SetLocal them inside the callee's CodeOrigin. This way, if we exit, the callee's
1643 // frame is already set up. If any SetLocal exits, we have a valid exit state.
1644 // This is required because if we didn't do this in two phases, we may exit in
1645 // the middle of arity fixup from the caller's CodeOrigin. This is unsound because if
1646 // we did the SetLocals in the caller's frame, the memcpy may clobber needed parts
1647 // of the frame right before exiting. For example, consider if we need to pad two args:
1648 // [arg3][arg2][arg1][arg0]
1649 // [fix ][fix ][arg3][arg2][arg1][arg0]
1650 // We memcpy starting from arg0 in the direction of arg3. If we were to exit at a type check
1651 // for arg3's SetLocal in the caller's CodeOrigin, we'd exit with a frame like so:
1652 // [arg3][arg2][arg1][arg2][arg1][arg0]
1653 // And the caller would then just end up thinking its argument are:
1654 // [arg3][arg2][arg1][arg2]
1655 // which is incorrect.
1657 Node* undefined = addToGraph(JSConstant, OpInfo(m_constantUndefined));
1658 // The stack needs to be aligned due to the JS calling convention. Thus, we have a hole if the count of arguments is not aligned.
1659 // We call this hole "extra slot". Consider the following case, the number of arguments is 2. If this argument
1660 // count does not fulfill the stack alignment requirement, we already inserted extra slots.
1662 // before: [ ... ][ext ][arg1][arg0][header]
1664 // In the above case, one extra slot is inserted. If the code's parameter count is 3, we will fixup arguments.
1665 // At that time, we can simply use this extra slots. So the fixuped stack is the following.
1667 // before: [ ... ][ext ][arg1][arg0][header]
1668 // after: [ ... ][arg2][arg1][arg0][header]
1670 // In such cases, we do not need to move frames.
1671 if (registerOffsetAfterFixup != registerOffset) {
1672 for (int index = 0; index < argumentCountIncludingThis; ++index) {
1673 Node* value = get(virtualRegisterForArgument(index, registerOffset));
1674 VirtualRegister argumentToSet = m_inlineStackTop->remapOperand(virtualRegisterForArgument(index, registerOffsetAfterFixup));
1675 addToGraph(MovHint, OpInfo(argumentToSet.offset()), value);
1676 m_setLocalQueue.append(DelayedSetLocal { currentCodeOrigin(), argumentToSet, value, ImmediateNakedSet });
1679 for (int index = 0; index < arityFixupCount; ++index) {
1680 VirtualRegister argumentToSet = m_inlineStackTop->remapOperand(virtualRegisterForArgument(argumentCountIncludingThis + index, registerOffsetAfterFixup));
1681 addToGraph(MovHint, OpInfo(argumentToSet.offset()), undefined);
1682 m_setLocalQueue.append(DelayedSetLocal { currentCodeOrigin(), argumentToSet, undefined, ImmediateNakedSet });
1685 // At this point, it's OK to OSR exit because we finished setting up
1686 // our callee's frame. We emit an ExitOK below from the callee's CodeOrigin.
1689 InlineStackEntry inlineStackEntry(this, codeBlock, codeBlock, callee.function(), resultReg,
1690 (VirtualRegister)inlineCallFrameStart, argumentCountIncludingThis, kind, continuationBlock);
1692 // This is where the actual inlining really happens.
1693 unsigned oldIndex = m_currentIndex;
1696 // At this point, it's again OK to OSR exit.
1700 processSetLocalQueue();
1702 InlineVariableData inlineVariableData;
1703 inlineVariableData.inlineCallFrame = m_inlineStackTop->m_inlineCallFrame;
1704 inlineVariableData.argumentPositionStart = argumentPositionStart;
1705 inlineVariableData.calleeVariable = 0;
1708 m_inlineStackTop->m_inlineCallFrame->isClosureCall
1709 == callee.isClosureCall());
1710 if (callee.isClosureCall()) {
1711 RELEASE_ASSERT(calleeVariable);
1712 inlineVariableData.calleeVariable = calleeVariable;
1715 m_graph.m_inlineVariableData.append(inlineVariableData);
1718 clearCaches(); // Reset our state now that we're back to the outer code.
1720 m_currentIndex = oldIndex;
1723 linkBlocks(inlineStackEntry.m_unlinkedBlocks, inlineStackEntry.m_blockLinkingTargets);
1725 // If the callee returned at least once, it prepared a continuation block for us.
1726 if (inlineStackEntry.m_continuationBlock)
1727 m_currentBlock = inlineStackEntry.m_continuationBlock;
1729 // We are in the case where the callee never returns (for example it loops forever).
1730 // In that case, all blocks should end in a terminal.
1731 ASSERT(m_graph.lastBlock()->terminal());
1732 // We then allocate a new block to continue in.
1733 m_currentBlock = allocateTargetableBlock(nextOffset);
1735 ASSERT(m_currentBlock);
1736 ASSERT(!m_currentBlock->terminal());
1738 prepareToParseBlock();
1741 template<typename ChecksFunctor>
1742 bool ByteCodeParser::attemptToInlineCall(Node* callTargetNode, int resultOperand, CallVariant callee, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind kind, SpeculatedType prediction, unsigned& inliningBalance, BasicBlock* continuationBlock, const ChecksFunctor& insertChecks)
1744 CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1746 if (!inliningBalance)
1749 VERBOSE_LOG(" Considering callee ", callee, "\n");
1751 // Intrinsics and internal functions can only be inlined if we're not doing varargs. This is because
1752 // we currently don't have any way of getting profiling information for arguments to non-JS varargs
1753 // calls. The prediction propagator won't be of any help because LoadVarargs obscures the data flow,
1754 // and there are no callsite value profiles and native function won't have callee value profiles for
1755 // those arguments. Even worse, if the intrinsic decides to exit, it won't really have anywhere to
1756 // exit to: LoadVarargs is effectful and it's part of the op_call_varargs, so we can't exit without
1757 // calling LoadVarargs twice.
1758 if (!InlineCallFrame::isVarargs(kind)) {
1760 bool didInsertChecks = false;
1761 auto insertChecksWithAccounting = [&] () {
1762 insertChecks(nullptr);
1763 didInsertChecks = true;
1766 auto endSpecialCase = [&] () {
1767 RELEASE_ASSERT(didInsertChecks);
1768 addToGraph(Phantom, callTargetNode);
1769 emitArgumentPhantoms(registerOffset, argumentCountIncludingThis);
1771 if (continuationBlock) {
1772 m_currentIndex = nextOffset;
1774 processSetLocalQueue();
1775 addJumpTo(continuationBlock);
1779 if (InternalFunction* function = callee.internalFunction()) {
1780 if (handleConstantInternalFunction(callTargetNode, resultOperand, function, registerOffset, argumentCountIncludingThis, specializationKind, prediction, insertChecksWithAccounting)) {
1784 RELEASE_ASSERT(!didInsertChecks);
1788 Intrinsic intrinsic = callee.intrinsicFor(specializationKind);
1789 if (intrinsic != NoIntrinsic) {
1790 if (handleIntrinsicCall(callTargetNode, resultOperand, intrinsic, registerOffset, argumentCountIncludingThis, prediction, insertChecksWithAccounting)) {
1795 RELEASE_ASSERT(!didInsertChecks);
1796 // We might still try to inline the Intrinsic because it might be a builtin JS function.
1799 if (Options::useDOMJIT()) {
1800 if (const DOMJIT::Signature* signature = callee.signatureFor(specializationKind)) {
1801 if (handleDOMJITCall(callTargetNode, resultOperand, signature, registerOffset, argumentCountIncludingThis, prediction, insertChecksWithAccounting)) {
1805 RELEASE_ASSERT(!didInsertChecks);
1810 unsigned myInliningCost = inliningCost(callee, argumentCountIncludingThis, kind);
1811 if (myInliningCost > inliningBalance)
1814 Instruction* savedCurrentInstruction = m_currentInstruction;
1815 inlineCall(callTargetNode, resultOperand, callee, registerOffset, argumentCountIncludingThis, nextOffset, kind, continuationBlock, insertChecks);
1816 inliningBalance -= myInliningCost;
1817 m_currentInstruction = savedCurrentInstruction;
1821 bool ByteCodeParser::handleInlining(
1822 Node* callTargetNode, int resultOperand, const CallLinkStatus& callLinkStatus,
1823 int registerOffsetOrFirstFreeReg, VirtualRegister thisArgument,
1824 VirtualRegister argumentsArgument, unsigned argumentsOffset, int argumentCountIncludingThis,
1825 unsigned nextOffset, NodeType callOp, InlineCallFrame::Kind kind, SpeculatedType prediction)
1827 VERBOSE_LOG("Handling inlining...\nStack: ", currentCodeOrigin(), "\n");
1828 CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1830 if (!callLinkStatus.size()) {
1831 VERBOSE_LOG("Bailing inlining.\n");
1835 if (InlineCallFrame::isVarargs(kind)
1836 && callLinkStatus.maxNumArguments() > Options::maximumVarargsForInlining()) {
1837 VERBOSE_LOG("Bailing inlining because of varargs.\n");
1841 unsigned inliningBalance = Options::maximumFunctionForCallInlineCandidateInstructionCount();
1842 if (specializationKind == CodeForConstruct)
1843 inliningBalance = std::min(inliningBalance, Options::maximumFunctionForConstructInlineCandidateInstructionCount());
1844 if (callLinkStatus.isClosureCall())
1845 inliningBalance = std::min(inliningBalance, Options::maximumFunctionForClosureCallInlineCandidateInstructionCount());
1847 // First check if we can avoid creating control flow. Our inliner does some CFG
1848 // simplification on the fly and this helps reduce compile times, but we can only leverage
1849 // this in cases where we don't need control flow diamonds to check the callee.
1850 if (!callLinkStatus.couldTakeSlowPath() && callLinkStatus.size() == 1) {
1853 // Only used for varargs calls.
1854 unsigned mandatoryMinimum = 0;
1855 unsigned maxNumArguments = 0;
1857 if (InlineCallFrame::isVarargs(kind)) {
1858 if (FunctionExecutable* functionExecutable = callLinkStatus[0].functionExecutable())
1859 mandatoryMinimum = functionExecutable->parameterCount();
1861 mandatoryMinimum = 0;
1864 maxNumArguments = std::max(
1865 callLinkStatus.maxNumArguments(),
1866 mandatoryMinimum + 1);
1868 // We sort of pretend that this *is* the number of arguments that were passed.
1869 argumentCountIncludingThis = maxNumArguments;
1871 registerOffset = registerOffsetOrFirstFreeReg + 1;
1872 registerOffset -= maxNumArguments; // includes "this"
1873 registerOffset -= CallFrame::headerSizeInRegisters;
1874 registerOffset = -WTF::roundUpToMultipleOf(
1875 stackAlignmentRegisters(),
1878 registerOffset = registerOffsetOrFirstFreeReg;
1880 bool result = attemptToInlineCall(
1881 callTargetNode, resultOperand, callLinkStatus[0], registerOffset,
1882 argumentCountIncludingThis, nextOffset, kind, prediction,
1883 inliningBalance, nullptr, [&] (CodeBlock* codeBlock) {
1884 emitFunctionChecks(callLinkStatus[0], callTargetNode, thisArgument);
1886 // If we have a varargs call, we want to extract the arguments right now.
1887 if (InlineCallFrame::isVarargs(kind)) {
1888 int remappedRegisterOffset =
1889 m_inlineStackTop->remapOperand(VirtualRegister(registerOffset)).offset();
1891 ensureLocals(VirtualRegister(remappedRegisterOffset).toLocal());
1893 int argumentStart = registerOffset + CallFrame::headerSizeInRegisters;
1894 int remappedArgumentStart =
1895 m_inlineStackTop->remapOperand(VirtualRegister(argumentStart)).offset();
1897 LoadVarargsData* data = m_graph.m_loadVarargsData.add();
1898 data->start = VirtualRegister(remappedArgumentStart + 1);
1899 data->count = VirtualRegister(remappedRegisterOffset + CallFrameSlot::argumentCount);
1900 data->offset = argumentsOffset;
1901 data->limit = maxNumArguments;
1902 data->mandatoryMinimum = mandatoryMinimum;
1904 if (callOp == TailCallForwardVarargs)
1905 addToGraph(ForwardVarargs, OpInfo(data));
1907 addToGraph(LoadVarargs, OpInfo(data), get(argumentsArgument));
1909 // LoadVarargs may OSR exit. Hence, we need to keep alive callTargetNode, thisArgument
1910 // and argumentsArgument for the baseline JIT. However, we only need a Phantom for
1911 // callTargetNode because the other 2 are still in use and alive at this point.
1912 addToGraph(Phantom, callTargetNode);
1914 // In DFG IR before SSA, we cannot insert control flow between after the
1915 // LoadVarargs and the last SetArgument. This isn't a problem once we get to DFG
1916 // SSA. Fortunately, we also have other reasons for not inserting control flow
1919 VariableAccessData* countVariable = newVariableAccessData(
1920 VirtualRegister(remappedRegisterOffset + CallFrameSlot::argumentCount));
1921 // This is pretty lame, but it will force the count to be flushed as an int. This doesn't
1922 // matter very much, since our use of a SetArgument and Flushes for this local slot is
1923 // mostly just a formality.
1924 countVariable->predict(SpecInt32Only);
1925 countVariable->mergeIsProfitableToUnbox(true);
1926 Node* setArgumentCount = addToGraph(SetArgument, OpInfo(countVariable));
1927 m_currentBlock->variablesAtTail.setOperand(countVariable->local(), setArgumentCount);
1929 set(VirtualRegister(argumentStart), get(thisArgument), ImmediateNakedSet);
1930 for (unsigned argument = 1; argument < maxNumArguments; ++argument) {
1931 VariableAccessData* variable = newVariableAccessData(
1932 VirtualRegister(remappedArgumentStart + argument));
1933 variable->mergeShouldNeverUnbox(true); // We currently have nowhere to put the type check on the LoadVarargs. LoadVarargs is effectful, so after it finishes, we cannot exit.
1935 // For a while it had been my intention to do things like this inside the
1936 // prediction injection phase. But in this case it's really best to do it here,
1937 // because it's here that we have access to the variable access datas for the
1938 // inlining we're about to do.
1940 // Something else that's interesting here is that we'd really love to get
1941 // predictions from the arguments loaded at the callsite, rather than the
1942 // arguments received inside the callee. But that probably won't matter for most
1944 if (codeBlock && argument < static_cast<unsigned>(codeBlock->numParameters())) {
1945 ConcurrentJSLocker locker(codeBlock->m_lock);
1946 ValueProfile& profile = codeBlock->valueProfileForArgument(argument);
1947 variable->predict(profile.computeUpdatedPrediction(locker));
1950 Node* setArgument = addToGraph(SetArgument, OpInfo(variable));
1951 m_currentBlock->variablesAtTail.setOperand(variable->local(), setArgument);
1955 VERBOSE_LOG("Done inlining (simple).\nStack: ", currentCodeOrigin(), "\nResult: ", result, "\n");
1959 // We need to create some kind of switch over callee. For now we only do this if we believe that
1960 // we're in the top tier. We have two reasons for this: first, it provides us an opportunity to
1961 // do more detailed polyvariant/polymorphic profiling; and second, it reduces compile times in
1962 // the DFG. And by polyvariant profiling we mean polyvariant profiling of *this* call. Note that
1963 // we could improve that aspect of this by doing polymorphic inlining but having the profiling
1965 if (!isFTL(m_graph.m_plan.mode) || !Options::usePolymorphicCallInlining()
1966 || InlineCallFrame::isVarargs(kind)) {
1967 VERBOSE_LOG("Bailing inlining (hard).\nStack: ", currentCodeOrigin(), "\n");
1971 // If the claim is that this did not originate from a stub, then we don't want to emit a switch
1972 // statement. Whenever the non-stub profiling says that it could take slow path, it really means that
1974 if (!Options::usePolymorphicCallInliningForNonStubStatus()
1975 && !callLinkStatus.isBasedOnStub()) {
1976 VERBOSE_LOG("Bailing inlining (non-stub polymorphism).\nStack: ", currentCodeOrigin(), "\n");
1980 bool allAreClosureCalls = true;
1981 bool allAreDirectCalls = true;
1982 for (unsigned i = callLinkStatus.size(); i--;) {
1983 if (callLinkStatus[i].isClosureCall())
1984 allAreDirectCalls = false;
1986 allAreClosureCalls = false;
1989 Node* thingToSwitchOn;
1990 if (allAreDirectCalls)
1991 thingToSwitchOn = callTargetNode;
1992 else if (allAreClosureCalls)
1993 thingToSwitchOn = addToGraph(GetExecutable, callTargetNode);
1995 // FIXME: We should be able to handle this case, but it's tricky and we don't know of cases
1996 // where it would be beneficial. It might be best to handle these cases as if all calls were
1998 // https://bugs.webkit.org/show_bug.cgi?id=136020
1999 VERBOSE_LOG("Bailing inlining (mix).\nStack: ", currentCodeOrigin(), "\n");
2003 VERBOSE_LOG("Doing hard inlining...\nStack: ", currentCodeOrigin(), "\n");
2005 int registerOffset = registerOffsetOrFirstFreeReg;
2007 // This makes me wish that we were in SSA all the time. We need to pick a variable into which to
2008 // store the callee so that it will be accessible to all of the blocks we're about to create. We
2009 // get away with doing an immediate-set here because we wouldn't have performed any side effects
2011 VERBOSE_LOG("Register offset: ", registerOffset);
2012 VirtualRegister calleeReg(registerOffset + CallFrameSlot::callee);
2013 calleeReg = m_inlineStackTop->remapOperand(calleeReg);
2014 VERBOSE_LOG("Callee is going to be ", calleeReg, "\n");
2015 setDirect(calleeReg, callTargetNode, ImmediateSetWithFlush);
2017 // It's OK to exit right now, even though we set some locals. That's because those locals are not
2022 SwitchData& data = *m_graph.m_switchData.add();
2023 data.kind = SwitchCell;
2024 addToGraph(Switch, OpInfo(&data), thingToSwitchOn);
2025 m_currentBlock->didLink();
2027 BasicBlock* continuationBlock = allocateUntargetableBlock();
2028 VERBOSE_LOG("Adding untargetable block ", RawPointer(continuationBlock), " (continuation)\n");
2030 // We may force this true if we give up on inlining any of the edges.
2031 bool couldTakeSlowPath = callLinkStatus.couldTakeSlowPath();
2033 VERBOSE_LOG("About to loop over functions at ", currentCodeOrigin(), ".\n");
2035 unsigned oldOffset = m_currentIndex;
2036 for (unsigned i = 0; i < callLinkStatus.size(); ++i) {
2037 m_currentIndex = oldOffset;
2038 BasicBlock* calleeEntryBlock = allocateUntargetableBlock();
2039 m_currentBlock = calleeEntryBlock;
2040 prepareToParseBlock();
2042 Node* myCallTargetNode = getDirect(calleeReg);
2044 bool inliningResult = attemptToInlineCall(
2045 myCallTargetNode, resultOperand, callLinkStatus[i], registerOffset,
2046 argumentCountIncludingThis, nextOffset, kind, prediction,
2047 inliningBalance, continuationBlock, [&] (CodeBlock*) { });
2049 if (!inliningResult) {
2050 // That failed so we let the block die. Nothing interesting should have been added to
2051 // the block. We also give up on inlining any of the (less frequent) callees.
2052 ASSERT(m_graph.m_blocks.last() == m_currentBlock);
2053 m_graph.killBlockAndItsContents(m_currentBlock);
2054 m_graph.m_blocks.removeLast();
2055 VERBOSE_LOG("Inlining of a poly call failed, we will have to go through a slow path\n");
2057 // The fact that inlining failed means we need a slow path.
2058 couldTakeSlowPath = true;
2062 JSCell* thingToCaseOn;
2063 if (allAreDirectCalls)
2064 thingToCaseOn = callLinkStatus[i].nonExecutableCallee();
2066 ASSERT(allAreClosureCalls);
2067 thingToCaseOn = callLinkStatus[i].executable();
2069 data.cases.append(SwitchCase(m_graph.freeze(thingToCaseOn), calleeEntryBlock));
2070 VERBOSE_LOG("Finished inlining ", callLinkStatus[i], " at ", currentCodeOrigin(), ".\n");
2074 m_currentBlock = allocateUntargetableBlock();
2075 m_currentIndex = oldOffset;
2077 data.fallThrough = BranchTarget(m_currentBlock);
2078 prepareToParseBlock();
2079 Node* myCallTargetNode = getDirect(calleeReg);
2080 if (couldTakeSlowPath) {
2082 resultOperand, callOp, nullptr, myCallTargetNode, argumentCountIncludingThis,
2083 registerOffset, prediction);
2084 VERBOSE_LOG("We added a call in the slow path\n");
2086 addToGraph(CheckBadCell);
2087 addToGraph(Phantom, myCallTargetNode);
2088 emitArgumentPhantoms(registerOffset, argumentCountIncludingThis);
2090 set(VirtualRegister(resultOperand), addToGraph(BottomValue));
2091 VERBOSE_LOG("couldTakeSlowPath was false\n");
2094 m_currentIndex = nextOffset;
2095 m_exitOK = true; // Origin changed, so it's fine to exit again.
2096 processSetLocalQueue();
2098 if (Node* terminal = m_currentBlock->terminal())
2099 ASSERT_UNUSED(terminal, terminal->op() == TailCall || terminal->op() == TailCallVarargs || terminal->op() == TailCallForwardVarargs);
2101 addJumpTo(continuationBlock);
2104 prepareToParseBlock();
2106 m_currentIndex = oldOffset;
2107 m_currentBlock = continuationBlock;
2110 VERBOSE_LOG("Done inlining (hard).\nStack: ", currentCodeOrigin(), "\n");
2114 template<typename ChecksFunctor>
2115 bool ByteCodeParser::handleMinMax(int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& insertChecks)
2117 ASSERT(op == ArithMin || op == ArithMax);
2119 if (argumentCountIncludingThis == 1) {
2121 double result = op == ArithMax ? -std::numeric_limits<double>::infinity() : +std::numeric_limits<double>::infinity();
2122 set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_graph.freeze(jsDoubleNumber(result)))));
2126 if (argumentCountIncludingThis == 2) {
2128 Node* result = get(VirtualRegister(virtualRegisterForArgument(1, registerOffset)));
2129 addToGraph(Phantom, Edge(result, NumberUse));
2130 set(VirtualRegister(resultOperand), result);
2134 if (argumentCountIncludingThis == 3) {
2136 set(VirtualRegister(resultOperand), addToGraph(op, get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset))));
2140 // Don't handle >=3 arguments for now.
2144 template<typename ChecksFunctor>
2145 bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrinsic intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks)
2147 VERBOSE_LOG(" The intrinsic is ", intrinsic, "\n");
2149 // It so happens that the code below doesn't handle the invalid result case. We could fix that, but
2150 // it would only benefit intrinsics called as setters, like if you do:
2152 // o.__defineSetter__("foo", Math.pow)
2154 // Which is extremely amusing, but probably not worth optimizing.
2155 if (!VirtualRegister(resultOperand).isValid())
2158 switch (intrinsic) {
2160 // Intrinsic Functions:
2162 case AbsIntrinsic: {
2163 if (argumentCountIncludingThis == 1) { // Math.abs()
2165 set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN)));
2169 if (!MacroAssembler::supportsFloatingPointAbs())
2173 Node* node = addToGraph(ArithAbs, get(virtualRegisterForArgument(1, registerOffset)));
2174 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
2175 node->mergeFlags(NodeMayOverflowInt32InDFG);
2176 set(VirtualRegister(resultOperand), node);
2181 return handleMinMax(resultOperand, ArithMin, registerOffset, argumentCountIncludingThis, insertChecks);
2184 return handleMinMax(resultOperand, ArithMax, registerOffset, argumentCountIncludingThis, insertChecks);
2186 #define DFG_ARITH_UNARY(capitalizedName, lowerName) \
2187 case capitalizedName##Intrinsic:
2188 FOR_EACH_DFG_ARITH_UNARY_OP(DFG_ARITH_UNARY)
2189 #undef DFG_ARITH_UNARY
2191 if (argumentCountIncludingThis == 1) {
2193 set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN)));
2196 Arith::UnaryType type = Arith::UnaryType::Sin;
2197 switch (intrinsic) {
2198 #define DFG_ARITH_UNARY(capitalizedName, lowerName) \
2199 case capitalizedName##Intrinsic: \
2200 type = Arith::UnaryType::capitalizedName; \
2202 FOR_EACH_DFG_ARITH_UNARY_OP(DFG_ARITH_UNARY)
2203 #undef DFG_ARITH_UNARY
2205 RELEASE_ASSERT_NOT_REACHED();
2208 set(VirtualRegister(resultOperand), addToGraph(ArithUnary, OpInfo(static_cast<std::underlying_type<Arith::UnaryType>::type>(type)), get(virtualRegisterForArgument(1, registerOffset))));
2212 case FRoundIntrinsic:
2213 case SqrtIntrinsic: {
2214 if (argumentCountIncludingThis == 1) {
2216 set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN)));
2220 NodeType nodeType = Unreachable;
2221 switch (intrinsic) {
2222 case FRoundIntrinsic:
2223 nodeType = ArithFRound;
2226 nodeType = ArithSqrt;
2229 RELEASE_ASSERT_NOT_REACHED();
2232 set(VirtualRegister(resultOperand), addToGraph(nodeType, get(virtualRegisterForArgument(1, registerOffset))));
2236 case PowIntrinsic: {
2237 if (argumentCountIncludingThis < 3) {
2238 // Math.pow() and Math.pow(x) return NaN.
2240 set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN)));
2244 VirtualRegister xOperand = virtualRegisterForArgument(1, registerOffset);
2245 VirtualRegister yOperand = virtualRegisterForArgument(2, registerOffset);
2246 set(VirtualRegister(resultOperand), addToGraph(ArithPow, get(xOperand), get(yOperand)));
2250 case ArrayPushIntrinsic: {
2251 #if USE(JSVALUE32_64)
2252 if (isX86() || isMIPS()) {
2253 if (argumentCountIncludingThis > 2)
2258 if (static_cast<unsigned>(argumentCountIncludingThis) >= MIN_SPARSE_ARRAY_INDEX)
2261 ArrayMode arrayMode = getArrayMode(m_currentInstruction[OPCODE_LENGTH(op_call) - 2].u.arrayProfile);
2262 if (!arrayMode.isJSArray())
2264 switch (arrayMode.type()) {
2267 case Array::Contiguous:
2268 case Array::ArrayStorage: {
2271 addVarArgChild(nullptr); // For storage.
2272 for (int i = 0; i < argumentCountIncludingThis; ++i)
2273 addVarArgChild(get(virtualRegisterForArgument(i, registerOffset)));
2274 Node* arrayPush = addToGraph(Node::VarArg, ArrayPush, OpInfo(arrayMode.asWord()), OpInfo(prediction));
2275 set(VirtualRegister(resultOperand), arrayPush);
2285 case ArraySliceIntrinsic: {
2286 #if USE(JSVALUE32_64)
2287 if (isX86() || isMIPS()) {
2288 // There aren't enough registers for this to be done easily.
2292 if (argumentCountIncludingThis < 2)
2295 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadConstantCache)
2296 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache))
2299 ArrayMode arrayMode = getArrayMode(m_currentInstruction[OPCODE_LENGTH(op_call) - 2].u.arrayProfile);
2300 if (!arrayMode.isJSArray())
2303 if (arrayMode.arrayClass() != Array::OriginalArray)
2306 switch (arrayMode.type()) {
2309 case Array::Contiguous: {
2310 JSGlobalObject* globalObject = m_graph.globalObjectFor(currentNodeOrigin().semantic);
2312 Structure* arrayPrototypeStructure = globalObject->arrayPrototype()->structure();
2313 Structure* objectPrototypeStructure = globalObject->objectPrototype()->structure();
2315 // FIXME: We could easily relax the Array/Object.prototype transition as long as we OSR exitted if we saw a hole.
2316 // https://bugs.webkit.org/show_bug.cgi?id=173171
2317 if (globalObject->arraySpeciesWatchpoint().state() == IsWatched
2318 && globalObject->havingABadTimeWatchpoint()->isStillValid()
2319 && arrayPrototypeStructure->transitionWatchpointSetIsStillValid()
2320 && objectPrototypeStructure->transitionWatchpointSetIsStillValid()
2321 && globalObject->arrayPrototypeChainIsSane()) {
2323 m_graph.watchpoints().addLazily(globalObject->arraySpeciesWatchpoint());
2324 m_graph.watchpoints().addLazily(globalObject->havingABadTimeWatchpoint());
2325 m_graph.registerAndWatchStructureTransition(arrayPrototypeStructure);
2326 m_graph.registerAndWatchStructureTransition(objectPrototypeStructure);
2330 Node* array = get(virtualRegisterForArgument(0, registerOffset));
2331 // We do a few things here to prove that we aren't skipping doing side-effects in an observable way:
2332 // 1. We ensure that the "constructor" property hasn't been changed (because the observable
2333 // effects of slice require that we perform a Get(array, "constructor") and we can skip
2334 // that if we're an original array structure. (We can relax this in the future by using
2335 // TryGetById and CheckCell).
2337 // 2. We check that the array we're calling slice on has the same global object as the lexical
2338 // global object that this code is running in. This requirement is necessary because we setup the
2339 // watchpoints above on the lexical global object. This means that code that calls slice on
2340 // arrays produced by other global objects won't get this optimization. We could relax this
2341 // requirement in the future by checking that the watchpoint hasn't fired at runtime in the code
2342 // we generate instead of registering it as a watchpoint that would invalidate the compilation.
2344 // 3. By proving we're an original array structure, we guarantee that the incoming array
2345 // isn't a subclass of Array.
2347 StructureSet structureSet;
2348 structureSet.add(globalObject->originalArrayStructureForIndexingType(ArrayWithInt32));
2349 structureSet.add(globalObject->originalArrayStructureForIndexingType(ArrayWithContiguous));
2350 structureSet.add(globalObject->originalArrayStructureForIndexingType(ArrayWithDouble));
2351 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(structureSet)), array);
2353 addVarArgChild(array);
2354 addVarArgChild(get(virtualRegisterForArgument(1, registerOffset))); // Start index.
2355 if (argumentCountIncludingThis >= 3)
2356 addVarArgChild(get(virtualRegisterForArgument(2, registerOffset))); // End index.
2357 addVarArgChild(addToGraph(GetButterfly, array));
2359 Node* arraySlice = addToGraph(Node::VarArg, ArraySlice, OpInfo(), OpInfo());
2360 set(VirtualRegister(resultOperand), arraySlice);
2370 RELEASE_ASSERT_NOT_REACHED();
2374 case ArrayIndexOfIntrinsic: {
2375 if (argumentCountIncludingThis < 2)
2378 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIndexingType)
2379 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadConstantCache)
2380 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)
2381 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
2384 ArrayMode arrayMode = getArrayMode(m_currentInstruction[OPCODE_LENGTH(op_call) - 2].u.arrayProfile);
2385 if (!arrayMode.isJSArray())
2388 if (arrayMode.arrayClass() != Array::OriginalArray)
2391 // We do not want to convert arrays into one type just to perform indexOf.
2392 if (arrayMode.doesConversion())
2395 switch (arrayMode.type()) {
2398 case Array::Contiguous: {
2399 JSGlobalObject* globalObject = m_graph.globalObjectFor(currentNodeOrigin().semantic);
2401 Structure* arrayPrototypeStructure = globalObject->arrayPrototype()->structure();
2402 Structure* objectPrototypeStructure = globalObject->objectPrototype()->structure();
2404 // FIXME: We could easily relax the Array/Object.prototype transition as long as we OSR exitted if we saw a hole.
2405 // https://bugs.webkit.org/show_bug.cgi?id=173171
2406 if (globalObject->havingABadTimeWatchpoint()->isStillValid()
2407 && arrayPrototypeStructure->transitionWatchpointSetIsStillValid()
2408 && objectPrototypeStructure->transitionWatchpointSetIsStillValid()
2409 && globalObject->arrayPrototypeChainIsSane()) {
2411 m_graph.watchpoints().addLazily(globalObject->havingABadTimeWatchpoint());
2412 m_graph.registerAndWatchStructureTransition(arrayPrototypeStructure);
2413 m_graph.registerAndWatchStructureTransition(objectPrototypeStructure);
2417 Node* array = get(virtualRegisterForArgument(0, registerOffset));
2418 addVarArgChild(array);
2419 addVarArgChild(get(virtualRegisterForArgument(1, registerOffset))); // Search element.
2420 if (argumentCountIncludingThis >= 3)
2421 addVarArgChild(get(virtualRegisterForArgument(2, registerOffset))); // Start index.
2422 addVarArgChild(nullptr);
2424 Node* node = addToGraph(Node::VarArg, ArrayIndexOf, OpInfo(arrayMode.asWord()), OpInfo());
2425 set(VirtualRegister(resultOperand), node);
2435 RELEASE_ASSERT_NOT_REACHED();
2440 case ArrayPopIntrinsic: {
2441 if (argumentCountIncludingThis != 1)
2444 ArrayMode arrayMode = getArrayMode(m_currentInstruction[OPCODE_LENGTH(op_call) - 2].u.arrayProfile);
2445 if (!arrayMode.isJSArray())
2447 switch (arrayMode.type()) {
2450 case Array::Contiguous:
2451 case Array::ArrayStorage: {
2453 Node* arrayPop = addToGraph(ArrayPop, OpInfo(arrayMode.asWord()), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)));
2454 set(VirtualRegister(resultOperand), arrayPop);
2463 case AtomicsAddIntrinsic:
2464 case AtomicsAndIntrinsic:
2465 case AtomicsCompareExchangeIntrinsic:
2466 case AtomicsExchangeIntrinsic:
2467 case AtomicsIsLockFreeIntrinsic:
2468 case AtomicsLoadIntrinsic:
2469 case AtomicsOrIntrinsic:
2470 case AtomicsStoreIntrinsic:
2471 case AtomicsSubIntrinsic:
2472 case AtomicsXorIntrinsic: {
2476 NodeType op = LastNodeType;
2477 unsigned numArgs = 0; // Number of actual args; we add one for the backing store pointer.
2478 switch (intrinsic) {
2479 case AtomicsAddIntrinsic:
2483 case AtomicsAndIntrinsic:
2487 case AtomicsCompareExchangeIntrinsic:
2488 op = AtomicsCompareExchange;
2491 case AtomicsExchangeIntrinsic:
2492 op = AtomicsExchange;
2495 case AtomicsIsLockFreeIntrinsic:
2496 // This gets no backing store, but we need no special logic for this since this also does
2497 // not need varargs.
2498 op = AtomicsIsLockFree;
2501 case AtomicsLoadIntrinsic:
2505 case AtomicsOrIntrinsic:
2509 case AtomicsStoreIntrinsic:
2513 case AtomicsSubIntrinsic:
2517 case AtomicsXorIntrinsic:
2522 RELEASE_ASSERT_NOT_REACHED();
2526 if (static_cast<unsigned>(argumentCountIncludingThis) < 1 + numArgs)
2531 Vector<Node*, 3> args;
2532 for (unsigned i = 0; i < numArgs; ++i)
2533 args.append(get(virtualRegisterForArgument(1 + i, registerOffset)));
2536 if (numArgs + 1 <= 3) {
2537 while (args.size() < 3)
2538 args.append(nullptr);
2539 result = addToGraph(op, OpInfo(ArrayMode(Array::SelectUsingPredictions).asWord()), OpInfo(prediction), args[0], args[1], args[2]);
2541 for (Node* node : args)
2542 addVarArgChild(node);
2543 addVarArgChild(nullptr);
2544 result = addToGraph(Node::VarArg, op, OpInfo(ArrayMode(Array::SelectUsingPredictions).asWord()), OpInfo(prediction));
2547 set(VirtualRegister(resultOperand), result);
2551 case ParseIntIntrinsic: {
2552 if (argumentCountIncludingThis < 2)
2555 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell) || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
2559 VirtualRegister valueOperand = virtualRegisterForArgument(1, registerOffset);
2561 if (argumentCountIncludingThis == 2)
2562 parseInt = addToGraph(ParseInt, OpInfo(), OpInfo(prediction), get(valueOperand));
2564 ASSERT(argumentCountIncludingThis > 2);
2565 VirtualRegister radixOperand = virtualRegisterForArgument(2, registerOffset);
2566 parseInt = addToGraph(ParseInt, OpInfo(), OpInfo(prediction), get(valueOperand), get(radixOperand));
2568 set(VirtualRegister(resultOperand), parseInt);
2572 case CharCodeAtIntrinsic: {
2573 if (argumentCountIncludingThis != 2)
2577 VirtualRegister thisOperand = virtualRegisterForArgument(0, registerOffset);
2578 VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
2579 Node* charCode = addToGraph(StringCharCodeAt, OpInfo(ArrayMode(Array::String).asWord()), get(thisOperand), get(indexOperand));
2581 set(VirtualRegister(resultOperand), charCode);
2585 case CharAtIntrinsic: {
2586 if (argumentCountIncludingThis != 2)
2590 VirtualRegister thisOperand = virtualRegisterForArgument(0, registerOffset);
2591 VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
2592 Node* charCode = addToGraph(StringCharAt, OpInfo(ArrayMode(Array::String).asWord()), get(thisOperand), get(indexOperand));
2594 set(VirtualRegister(resultOperand), charCode);
2597 case Clz32Intrinsic: {
2599 if (argumentCountIncludingThis == 1)
2600 set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_graph.freeze(jsNumber(32)))));
2602 Node* operand = get(virtualRegisterForArgument(1, registerOffset));
2603 set(VirtualRegister(resultOperand), addToGraph(ArithClz32, operand));
2607 case FromCharCodeIntrinsic: {
2608 if (argumentCountIncludingThis != 2)
2612 VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
2613 Node* charCode = addToGraph(StringFromCharCode, get(indexOperand));
2615 set(VirtualRegister(resultOperand), charCode);
2620 case RegExpExecIntrinsic: {
2621 if (argumentCountIncludingThis != 2)
2625 Node* regExpExec = addToGraph(RegExpExec, OpInfo(0), OpInfo(prediction), addToGraph(GetGlobalObject, callee), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)));
2626 set(VirtualRegister(resultOperand), regExpExec);
2631 case RegExpTestIntrinsic:
2632 case RegExpTestFastIntrinsic: {
2633 if (argumentCountIncludingThis != 2)
2636 if (intrinsic == RegExpTestIntrinsic) {
2637 // Don't inline intrinsic if we exited due to one of the primordial RegExp checks failing.
2638 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell))
2641 JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
2642 Structure* regExpStructure = globalObject->regExpStructure();
2643 m_graph.registerStructure(regExpStructure);
2644 ASSERT(regExpStructure->storedPrototype().isObject());
2645 ASSERT(regExpStructure->storedPrototype().asCell()->classInfo(*m_vm) == RegExpPrototype::info());
2647 FrozenValue* regExpPrototypeObjectValue = m_graph.freeze(regExpStructure->storedPrototype());
2648 Structure* regExpPrototypeStructure = regExpPrototypeObjectValue->structure();
2650 auto isRegExpPropertySame = [&] (JSValue primordialProperty, UniquedStringImpl* propertyUID) {
2651 JSValue currentProperty;
2652 if (!m_graph.getRegExpPrototypeProperty(regExpStructure->storedPrototypeObject(), regExpPrototypeStructure, propertyUID, currentProperty))
2655 return currentProperty == primordialProperty;
2658 // Check that RegExp.exec is still the primordial RegExp.prototype.exec
2659 if (!isRegExpPropertySame(globalObject->regExpProtoExecFunction(), m_vm->propertyNames->exec.impl()))
2662 // Check that regExpObject is actually a RegExp object.
2663 Node* regExpObject = get(virtualRegisterForArgument(0, registerOffset));
2664 addToGraph(Check, Edge(regExpObject, RegExpObjectUse));
2666 // Check that regExpObject's exec is actually the primodial RegExp.prototype.exec.
2667 UniquedStringImpl* execPropertyID = m_vm->propertyNames->exec.impl();
2668 unsigned execIndex = m_graph.identifiers().ensure(execPropertyID);
2669 Node* actualProperty = addToGraph(TryGetById, OpInfo(execIndex), OpInfo(SpecFunction), Edge(regExpObject, CellUse));
2670 FrozenValue* regExpPrototypeExec = m_graph.freeze(globalObject->regExpProtoExecFunction());
2671 addToGraph(CheckCell, OpInfo(regExpPrototypeExec), Edge(actualProperty, CellUse));
2675 Node* regExpObject = get(virtualRegisterForArgument(0, registerOffset));
2676 Node* regExpExec = addToGraph(RegExpTest, OpInfo(0), OpInfo(prediction), addToGraph(GetGlobalObject, callee), regExpObject, get(virtualRegisterForArgument(1, registerOffset)));
2677 set(VirtualRegister(resultOperand), regExpExec);
2682 case ObjectGetPrototypeOfIntrinsic: {
2683 if (argumentCountIncludingThis != 2)
2687 set(VirtualRegister(resultOperand), addToGraph(GetPrototypeOf, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(1, registerOffset))));
2691 case ReflectGetPrototypeOfIntrinsic: {
2692 if (argumentCountIncludingThis != 2)
2696 set(VirtualRegister(resultOperand), addToGraph(GetPrototypeOf, OpInfo(0), OpInfo(prediction), Edge(get(virtualRegisterForArgument(1, registerOffset)), ObjectUse)));
2700 case IsTypedArrayViewIntrinsic: {
2701 ASSERT(argumentCountIncludingThis == 2);
2704 set(VirtualRegister(resultOperand), addToGraph(IsTypedArrayView, OpInfo(prediction), get(virtualRegisterForArgument(1, registerOffset))));
2708 case StringPrototypeReplaceIntrinsic: {
2709 if (argumentCountIncludingThis != 3)
2712 // Don't inline intrinsic if we exited due to "search" not being a RegExp or String object.
2713 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
2716 // Don't inline intrinsic if we exited due to one of the primordial RegExp checks failing.
2717 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell))
2720 JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
2721 Structure* regExpStructure = globalObject->regExpStructure();
2722 m_graph.registerStructure(regExpStructure);
2723 ASSERT(regExpStructure->storedPrototype().isObject());
2724 ASSERT(regExpStructure->storedPrototype().asCell()->classInfo(*m_vm) == RegExpPrototype::info());
2726 FrozenValue* regExpPrototypeObjectValue = m_graph.freeze(regExpStructure->storedPrototype());
2727 Structure* regExpPrototypeStructure = regExpPrototypeObjectValue->structure();
2729 auto isRegExpPropertySame = [&] (JSValue primordialProperty, UniquedStringImpl* propertyUID) {
2730 JSValue currentProperty;
2731 if (!m_graph.getRegExpPrototypeProperty(regExpStructure->storedPrototypeObject(), regExpPrototypeStructure, propertyUID, currentProperty))
2734 return currentProperty == primordialProperty;
2737 // Check that searchRegExp.exec is still the primordial RegExp.prototype.exec
2738 if (!isRegExpPropertySame(globalObject->regExpProtoExecFunction(), m_vm->propertyNames->exec.impl()))
2741 // Check that searchRegExp.global is still the primordial RegExp.prototype.global
2742 if (!isRegExpPropertySame(globalObject->regExpProtoGlobalGetter(), m_vm->propertyNames->global.impl()))
2745 // Check that searchRegExp.unicode is still the primordial RegExp.prototype.unicode
2746 if (!isRegExpPropertySame(globalObject->regExpProtoUnicodeGetter(), m_vm->propertyNames->unicode.impl()))
2749 // Check that searchRegExp[Symbol.match] is still the primordial RegExp.prototype[Symbol.replace]
2750 if (!isRegExpPropertySame(globalObject->regExpProtoSymbolReplaceFunction(), m_vm->propertyNames->replaceSymbol.impl()))
2755 Node* result = addToGraph(StringReplace, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset)));
2756 set(VirtualRegister(resultOperand), result);
2760 case StringPrototypeReplaceRegExpIntrinsic: {
2761 if (argumentCountIncludingThis != 3)
2765 Node* result = addToGraph(StringReplaceRegExp, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset)));
2766 set(VirtualRegister(resultOperand), result);
2770 case RoundIntrinsic:
2771 case FloorIntrinsic:
2773 case TruncIntrinsic: {
2774 if (argumentCountIncludingThis == 1) {
2776 set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN)));
2780 Node* operand = get(virtualRegisterForArgument(1, registerOffset));
2782 if (intrinsic == RoundIntrinsic)
2784 else if (intrinsic == FloorIntrinsic)
2786 else if (intrinsic == CeilIntrinsic)
2789 ASSERT(intrinsic == TruncIntrinsic);
2792 Node* roundNode = addToGraph(op, OpInfo(0), OpInfo(prediction), operand);
2793 set(VirtualRegister(resultOperand), roundNode);
2796 case IMulIntrinsic: {
2797 if (argumentCountIncludingThis != 3)
2800 VirtualRegister leftOperand = virtualRegisterForArgument(1, registerOffset);
2801 VirtualRegister rightOperand = virtualRegisterForArgument(2, registerOffset);
2802 Node* left = get(leftOperand);
2803 Node* right = get(rightOperand);
2804 set(VirtualRegister(resultOperand), addToGraph(ArithIMul, left, right));
2808 case RandomIntrinsic: {
2809 if (argumentCountIncludingThis != 1)
2812 set(VirtualRegister(resultOperand), addToGraph(ArithRandom));
2816 case DFGTrueIntrinsic: {
2818 set(VirtualRegister(resultOperand), jsConstant(jsBoolean(true)));
2822 case OSRExitIntrinsic: {
2824 addToGraph(ForceOSRExit);
2825 set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantUndefined)));
2829 case IsFinalTierIntrinsic: {
2831 set(VirtualRegister(resultOperand),
2832 jsConstant(jsBoolean(Options::useFTLJIT() ? isFTL(m_graph.m_plan.mode) : true)));
2836 case SetInt32HeapPredictionIntrinsic: {
2838 for (int i = 1; i < argumentCountIncludingThis; ++i) {
2839 Node* node = get(virtualRegisterForArgument(i, registerOffset));
2840 if (node->hasHeapPrediction())
2841 node->setHeapPrediction(SpecInt32Only);
2843 set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantUndefined)));
2847 case CheckInt32Intrinsic: {
2849 for (int i = 1; i < argumentCountIncludingThis; ++i) {
2850 Node* node = get(virtualRegisterForArgument(i, registerOffset));
2851 addToGraph(Phantom, Edge(node, Int32Use));
2853 set(VirtualRegister(resultOperand), jsConstant(jsBoolean(true)));
2857 case FiatInt52Intrinsic: {
2858 if (argumentCountIncludingThis != 2)
2861 VirtualRegister operand = virtualRegisterForArgument(1, registerOffset);
2863 set(VirtualRegister(resultOperand), addToGraph(FiatInt52, get(operand)));
2865 set(VirtualRegister(resultOperand), get(operand));
2869 case JSMapGetIntrinsic: {
2870 if (argumentCountIncludingThis != 2)
2874 Node* map = get(virtualRegisterForArgument(0, registerOffset));
2875 Node* key = get(virtualRegisterForArgument(1, registerOffset));
2876 Node* normalizedKey = addToGraph(NormalizeMapKey, key);
2877 Node* hash = addToGraph(MapHash, normalizedKey);
2878 Node* bucket = addToGraph(GetMapBucket, Edge(map, MapObjectUse), Edge(normalizedKey), Edge(hash));
2879 Node* result = addToGraph(LoadValueFromMapBucket, OpInfo(BucketOwnerType::Map), OpInfo(prediction), bucket);
2880 set(VirtualRegister(resultOperand), result);
2884 case JSSetHasIntrinsic:
2885 case JSMapHasIntrinsic: {
2886 if (argumentCountIncludingThis != 2)
2890 Node* mapOrSet = get(virtualRegisterForArgument(0, registerOffset));
2891 Node* key = get(virtualRegisterForArgument(1, registerOffset));
2892 Node* normalizedKey = addToGraph(NormalizeMapKey, key);
2893 Node* hash = addToGraph(MapHash, normalizedKey);
2894 UseKind useKind = intrinsic == JSSetHasIntrinsic ? SetObjectUse : MapObjectUse;
2895 Node* bucket = addToGraph(GetMapBucket, OpInfo(0), Edge(mapOrSet, useKind), Edge(normalizedKey), Edge(hash));
2896 JSCell* sentinel = nullptr;
2897 if (intrinsic == JSMapHasIntrinsic)
2898 sentinel = m_vm->sentinelMapBucket.get();
2900 sentinel = m_vm->sentinelSetBucket.get();
2902 FrozenValue* frozenPointer = m_graph.freeze(sentinel);
2903 Node* invertedResult = addToGraph(CompareEqPtr, OpInfo(frozenPointer), bucket);
2904 Node* result = addToGraph(LogicalNot, invertedResult);
2905 set(VirtualRegister(resultOperand), result);
2909 case JSSetAddIntrinsic: {
2910 if (argumentCountIncludingThis != 2)
2914 Node* base = get(virtualRegisterForArgument(0, registerOffset));
2915 Node* key = get(virtualRegisterForArgument(1, registerOffset));
2916 Node* normalizedKey = addToGraph(NormalizeMapKey, key);
2917 Node* hash = addToGraph(MapHash, normalizedKey);
2918 addToGraph(SetAdd, base, normalizedKey, hash);
2919 set(VirtualRegister(resultOperand), base);
2923 case JSMapSetIntrinsic: {
2924 if (argumentCountIncludingThis != 3)
2928 Node* base = get(virtualRegisterForArgument(0, registerOffset));
2929 Node* key = get(virtualRegisterForArgument(1, registerOffset));
2930 Node* value = get(virtualRegisterForArgument(2, registerOffset));
2932 Node* normalizedKey = addToGraph(NormalizeMapKey, key);
2933 Node* hash = addToGraph(MapHash, normalizedKey);
2935 addVarArgChild(base);
2936 addVarArgChild(normalizedKey);
2937 addVarArgChild(value);
2938 addVarArgChild(hash);
2939 addToGraph(Node::VarArg, MapSet, OpInfo(0), OpInfo(0));
2940 set(VirtualRegister(resultOperand), base);
2944 case JSSetBucketHeadIntrinsic:
2945 case JSMapBucketHeadIntrinsic: {
2946 ASSERT(argumentCountIncludingThis == 2);
2949 Node* map = get(virtualRegisterForArgument(1, registerOffset));
2950 UseKind useKind = intrinsic == JSSetBucketHeadIntrinsic ? SetObjectUse : MapObjectUse;
2951 Node* result = addToGraph(GetMapBucketHead, Edge(map, useKind));
2952 set(VirtualRegister(resultOperand), result);
2956 case JSSetBucketNextIntrinsic:
2957 case JSMapBucketNextIntrinsic: {
2958 ASSERT(argumentCountIncludingThis == 2);
2961 Node* bucket = get(virtualRegisterForArgument(1, registerOffset));
2962 BucketOwnerType type = intrinsic == JSSetBucketNextIntrinsic ? BucketOwnerType::Set : BucketOwnerType::Map;
2963 Node* result = addToGraph(GetMapBucketNext, OpInfo(type), bucket);
2964 set(VirtualRegister(resultOperand), result);
2968 case JSSetBucketKeyIntrinsic:
2969 case JSMapBucketKeyIntrinsic: {
2970 ASSERT(argumentCountIncludingThis == 2);
2973 Node* bucket = get(virtualRegisterForArgument(1, registerOffset));
2974 BucketOwnerType type = intrinsic == JSSetBucketKeyIntrinsic ? BucketOwnerType::Set : BucketOwnerType::Map;
2975 Node* result = addToGraph(LoadKeyFromMapBucket, OpInfo(type), OpInfo(prediction), bucket);
2976 set(VirtualRegister(resultOperand), result);
2980 case JSMapBucketValueIntrinsic: {
2981 ASSERT(argumentCountIncludingThis == 2);
2984 Node* bucket = get(virtualRegisterForArgument(1, registerOffset));
2985 Node* result = addToGraph(LoadValueFromMapBucket, OpInfo(BucketOwnerType::Map), OpInfo(prediction), bucket);
2986 set(VirtualRegister(resultOperand), result);
2990 case JSWeakMapGetIntrinsic: {
2991 if (argumentCountIncludingThis != 2)
2994 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
2998 Node* map = get(virtualRegisterForArgument(0, registerOffset));
2999 Node* key = get(virtualRegisterForArgument(1, registerOffset));
3000 Node* normalizedKey = addToGraph(NormalizeMapKey, key);
3001 Node* hash = addToGraph(MapHash, normalizedKey);
3002 Node* result = addToGraph(WeakMapGet, OpInfo(), OpInfo(prediction), map, normalizedKey, hash);
3003 set(VirtualRegister(resultOperand), result);
3007 case HasOwnPropertyIntrinsic: {
3008 if (argumentCountIncludingThis != 2)
3011 // This can be racy, that's fine. We know that once we observe that this is created,
3012 // that it will never be destroyed until the VM is destroyed. It's unlikely that
3013 // we'd ever get to the point where we inline this as an intrinsic without the
3014 // cache being created, however, it's possible if we always throw exceptions inside
3016 if (!m_vm->hasOwnPropertyCache())
3020 Node* object = get(virtualRegisterForArgument(0, registerOffset));
3021 Node* key = get(virtualRegisterForArgument(1, registerOffset));
3022 Node* result = addToGraph(HasOwnProperty, object, key);
3023 set(VirtualRegister(resultOperand), result);
3027 case StringPrototypeSliceIntrinsic: {
3028 if (argumentCountIncludingThis < 2)
3031 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3035 Node* thisString = get(virtualRegisterForArgument(0, registerOffset));
3036 Node* start = get(virtualRegisterForArgument(1, registerOffset));
3037 Node* end = nullptr;
3038 if (argumentCountIncludingThis > 2)
3039 end = get(virtualRegisterForArgument(2, registerOffset));
3040 Node* result = addToGraph(StringSlice, thisString, start, end);
3041 set(VirtualRegister(resultOperand), result);
3045 case StringPrototypeToLowerCaseIntrinsic: {
3046 if (argumentCountIncludingThis != 1)
3049 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3053 Node* thisString = get(virtualRegisterForArgument(0, registerOffset));
3054 Node* result = addToGraph(ToLowerCase, thisString);
3055 set(VirtualRegister(resultOperand), result);
3059 case NumberPrototypeToStringIntrinsic: {
3060 if (argumentCountIncludingThis != 1 && argumentCountIncludingThis != 2)
3063 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3067 Node* thisNumber = get(virtualRegisterForArgument(0, registerOffset));
3068 if (argumentCountIncludingThis == 1) {
3069 Node* result = addToGraph(ToString, thisNumber);
3070 set(VirtualRegister(resultOperand), result);
3072 Node* radix = get(virtualRegisterForArgument(1, registerOffset));
3073 Node* result = addToGraph(NumberToStringWithRadix, thisNumber, radix);
3074 set(VirtualRegister(resultOperand), result);
3079 case CPUMfenceIntrinsic:
3080 case CPURdtscIntrinsic:
3081 case CPUCpuidIntrinsic:
3082 case CPUPauseIntrinsic: {
3084 if (!isFTL(m_graph.m_plan.mode))
3087 set(VirtualRegister(resultOperand),
3088 addToGraph(CPUIntrinsic, OpInfo(intrinsic), OpInfo()));
3101 template<typename ChecksFunctor>
3102 bool ByteCodeParser::handleDOMJITCall(Node* callTarget, int resultOperand, const DOMJIT::Signature* signature, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks)
3104 if (argumentCountIncludingThis != static_cast<int>(1 + signature->argumentCount))
3106 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
3109 // FIXME: Currently, we only support functions which arguments are up to 2.
3110 // Eventually, we should extend this. But possibly, 2 or 3 can cover typical use cases.
3111 // https://bugs.webkit.org/show_bug.cgi?id=164346
3112 ASSERT_WITH_MESSAGE(argumentCountIncludingThis <= JSC_DOMJIT_SIGNATURE_MAX_ARGUMENTS_INCLUDING_THIS, "Currently CallDOM does not support an arbitrary length arguments.");
3115 addCall(resultOperand, Call, signature, callTarget, argumentCountIncludingThis, registerOffset, prediction);
3120 template<typename ChecksFunctor>
3121 bool ByteCodeParser::handleIntrinsicGetter(int resultOperand, SpeculatedType prediction, const GetByIdVariant& variant, Node* thisNode, const ChecksFunctor& insertChecks)
3123 switch (variant.intrinsic()) {
3124 case TypedArrayByteLengthIntrinsic: {
3127 TypedArrayType type = (*variant.structureSet().begin())->classInfo()->typedArrayStorageType;
3128 Array::Type arrayType = toArrayType(type);
3129 size_t logSize = logElementSize(type);
3131 variant.structureSet().forEach([&] (Structure* structure) {
3132 TypedArrayType curType = structure->classInfo()->typedArrayStorageType;
3133 ASSERT(logSize == logElementSize(curType));
3134 arrayType = refineTypedArrayType(arrayType, curType);
3135 ASSERT(arrayType != Array::Generic);
3138 Node* lengthNode = addToGraph(GetArrayLength, OpInfo(ArrayMode(arrayType).asWord()), thisNode);
3141 set(VirtualRegister(resultOperand), lengthNode);