2 * Copyright (C) 2011-2017 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "DFGByteCodeParser.h"
31 #include "ArithProfile.h"
32 #include "ArrayConstructor.h"
33 #include "BasicBlockLocation.h"
34 #include "CallLinkStatus.h"
35 #include "CodeBlock.h"
36 #include "CodeBlockWithJITType.h"
37 #include "DFGAbstractHeap.h"
38 #include "DFGArrayMode.h"
39 #include "DFGCapabilities.h"
40 #include "DFGClobberize.h"
41 #include "DFGClobbersExitState.h"
43 #include "DFGJITCode.h"
44 #include "FunctionCodeBlock.h"
45 #include "GetByIdStatus.h"
47 #include "JSCInlines.h"
48 #include "JSModuleEnvironment.h"
49 #include "JSModuleNamespaceObject.h"
50 #include "NumberConstructor.h"
51 #include "ObjectConstructor.h"
52 #include "PreciseJumpTargets.h"
53 #include "PutByIdFlags.h"
54 #include "PutByIdStatus.h"
55 #include "RegExpPrototype.h"
56 #include "StackAlignment.h"
57 #include "StringConstructor.h"
58 #include "StructureStubInfo.h"
60 #include <wtf/CommaPrinter.h>
61 #include <wtf/HashMap.h>
62 #include <wtf/MathExtras.h>
63 #include <wtf/StdLibExtras.h>
65 namespace JSC { namespace DFG {
67 static const bool verbose = false;
69 class ConstantBufferKey {
77 ConstantBufferKey(WTF::HashTableDeletedValueType)
83 ConstantBufferKey(CodeBlock* codeBlock, unsigned index)
84 : m_codeBlock(codeBlock)
89 bool operator==(const ConstantBufferKey& other) const
91 return m_codeBlock == other.m_codeBlock
92 && m_index == other.m_index;
97 return WTF::PtrHash<CodeBlock*>::hash(m_codeBlock) ^ m_index;
100 bool isHashTableDeletedValue() const
102 return !m_codeBlock && m_index;
105 CodeBlock* codeBlock() const { return m_codeBlock; }
106 unsigned index() const { return m_index; }
109 CodeBlock* m_codeBlock;
113 struct ConstantBufferKeyHash {
114 static unsigned hash(const ConstantBufferKey& key) { return key.hash(); }
115 static bool equal(const ConstantBufferKey& a, const ConstantBufferKey& b)
120 static const bool safeToCompareToEmptyOrDeleted = true;
123 } } // namespace JSC::DFG
127 template<typename T> struct DefaultHash;
128 template<> struct DefaultHash<JSC::DFG::ConstantBufferKey> {
129 typedef JSC::DFG::ConstantBufferKeyHash Hash;
132 template<typename T> struct HashTraits;
133 template<> struct HashTraits<JSC::DFG::ConstantBufferKey> : SimpleClassHashTraits<JSC::DFG::ConstantBufferKey> { };
137 namespace JSC { namespace DFG {
139 // === ByteCodeParser ===
141 // This class is used to compile the dataflow graph from a CodeBlock.
142 class ByteCodeParser {
144 ByteCodeParser(Graph& graph)
146 , m_codeBlock(graph.m_codeBlock)
147 , m_profiledBlock(graph.m_profiledBlock)
151 , m_constantUndefined(graph.freeze(jsUndefined()))
152 , m_constantNull(graph.freeze(jsNull()))
153 , m_constantNaN(graph.freeze(jsNumber(PNaN)))
154 , m_constantOne(graph.freeze(jsNumber(1)))
155 , m_numArguments(m_codeBlock->numParameters())
156 , m_numLocals(m_codeBlock->m_numCalleeLocals)
157 , m_parameterSlots(0)
158 , m_numPassedVarArgs(0)
159 , m_inlineStackTop(0)
160 , m_currentInstruction(0)
161 , m_hasDebuggerEnabled(graph.hasDebuggerEnabled())
163 ASSERT(m_profiledBlock);
166 // Parse a full CodeBlock of bytecode.
170 struct InlineStackEntry;
172 // Just parse from m_currentIndex to the end of the current CodeBlock.
173 void parseCodeBlock();
175 void ensureLocals(unsigned newNumLocals)
177 if (newNumLocals <= m_numLocals)
179 m_numLocals = newNumLocals;
180 for (size_t i = 0; i < m_graph.numBlocks(); ++i)
181 m_graph.block(i)->ensureLocals(newNumLocals);
184 // Helper for min and max.
185 template<typename ChecksFunctor>
186 bool handleMinMax(int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& insertChecks);
188 void refineStatically(CallLinkStatus&, Node* callTarget);
189 // Handle calls. This resolves issues surrounding inlining and intrinsics.
190 enum Terminality { Terminal, NonTerminal };
191 Terminality handleCall(
192 int result, NodeType op, InlineCallFrame::Kind, unsigned instructionSize,
193 Node* callTarget, int argCount, int registerOffset, CallLinkStatus,
194 SpeculatedType prediction);
195 Terminality handleCall(
196 int result, NodeType op, CallMode, unsigned instructionSize,
197 Node* callTarget, int argCount, int registerOffset, CallLinkStatus);
198 Terminality handleCall(int result, NodeType op, CallMode, unsigned instructionSize, int callee, int argCount, int registerOffset);
199 Terminality handleCall(Instruction* pc, NodeType op, CallMode);
200 Terminality handleVarargsCall(Instruction* pc, NodeType op, CallMode);
201 void emitFunctionChecks(CallVariant, Node* callTarget, VirtualRegister thisArgumnt);
202 void emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis);
203 Node* getArgumentCount();
204 unsigned inliningCost(CallVariant, int argumentCountIncludingThis, CallMode); // Return UINT_MAX if it's not an inlining candidate. By convention, intrinsics have a cost of 1.
205 // Handle inlining. Return true if it succeeded, false if we need to plant a call.
206 bool handleInlining(Node* callTargetNode, int resultOperand, const CallLinkStatus&, int registerOffset, VirtualRegister thisArgument, VirtualRegister argumentsArgument, unsigned argumentsOffset, int argumentCountIncludingThis, unsigned nextOffset, NodeType callOp, InlineCallFrame::Kind, SpeculatedType prediction);
207 enum CallerLinkability { CallerDoesNormalLinking, CallerLinksManually };
208 template<typename ChecksFunctor>
209 bool attemptToInlineCall(Node* callTargetNode, int resultOperand, CallVariant, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind, CallerLinkability, SpeculatedType prediction, unsigned& inliningBalance, const ChecksFunctor& insertChecks);
210 template<typename ChecksFunctor>
211 void inlineCall(Node* callTargetNode, int resultOperand, CallVariant, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind, CallerLinkability, const ChecksFunctor& insertChecks);
212 void cancelLinkingForBlock(InlineStackEntry*, BasicBlock*); // Only works when the given block is the last one to have been added for that inline stack entry.
213 // Handle intrinsic functions. Return true if it succeeded, false if we need to plant a call.
214 template<typename ChecksFunctor>
215 bool handleIntrinsicCall(Node* callee, int resultOperand, Intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks);
216 template<typename ChecksFunctor>
217 bool handleDOMJITCall(Node* callee, int resultOperand, const DOMJIT::Signature*, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks);
218 template<typename ChecksFunctor>
219 bool handleIntrinsicGetter(int resultOperand, const GetByIdVariant& intrinsicVariant, Node* thisNode, const ChecksFunctor& insertChecks);
220 template<typename ChecksFunctor>
221 bool handleTypedArrayConstructor(int resultOperand, InternalFunction*, int registerOffset, int argumentCountIncludingThis, TypedArrayType, const ChecksFunctor& insertChecks);
222 template<typename ChecksFunctor>
223 bool handleConstantInternalFunction(Node* callTargetNode, int resultOperand, InternalFunction*, int registerOffset, int argumentCountIncludingThis, CodeSpecializationKind, SpeculatedType, const ChecksFunctor& insertChecks);
224 Node* handlePutByOffset(Node* base, unsigned identifier, PropertyOffset, const InferredType::Descriptor&, Node* value);
225 Node* handleGetByOffset(SpeculatedType, Node* base, unsigned identifierNumber, PropertyOffset, const InferredType::Descriptor&, NodeType = GetByOffset);
226 bool handleDOMJITGetter(int resultOperand, const GetByIdVariant&, Node* thisNode, unsigned identifierNumber, SpeculatedType prediction);
227 bool handleModuleNamespaceLoad(int resultOperand, SpeculatedType, Node* base, GetByIdStatus);
229 // Create a presence ObjectPropertyCondition based on some known offset and structure set. Does not
230 // check the validity of the condition, but it may return a null one if it encounters a contradiction.
231 ObjectPropertyCondition presenceLike(
232 JSObject* knownBase, UniquedStringImpl*, PropertyOffset, const StructureSet&);
234 // Attempt to watch the presence of a property. It will watch that the property is present in the same
235 // way as in all of the structures in the set. It may emit code instead of just setting a watchpoint.
236 // Returns true if this all works out.
237 bool checkPresenceLike(JSObject* knownBase, UniquedStringImpl*, PropertyOffset, const StructureSet&);
238 void checkPresenceLike(Node* base, UniquedStringImpl*, PropertyOffset, const StructureSet&);
240 // Works with both GetByIdVariant and the setter form of PutByIdVariant.
241 template<typename VariantType>
242 Node* load(SpeculatedType, Node* base, unsigned identifierNumber, const VariantType&);
244 Node* store(Node* base, unsigned identifier, const PutByIdVariant&, Node* value);
247 int destinationOperand, SpeculatedType, Node* base, unsigned identifierNumber, GetByIdStatus, AccessType, unsigned instructionSize);
249 Node* base, unsigned identifierNumber, Node* value, const PutByIdStatus&, bool isDirect);
251 Node* base, unsigned identifierNumber, Node* value, const PutByIdStatus&,
254 // Either register a watchpoint or emit a check for this condition. Returns false if the
255 // condition no longer holds, and therefore no reasonable check can be emitted.
256 bool check(const ObjectPropertyCondition&);
258 GetByOffsetMethod promoteToConstant(GetByOffsetMethod);
260 // Either register a watchpoint or emit a check for this condition. It must be a Presence
261 // condition. It will attempt to promote a Presence condition to an Equivalence condition.
262 // Emits code for the loaded value that the condition guards, and returns a node containing
263 // the loaded value. Returns null if the condition no longer holds.
264 GetByOffsetMethod planLoad(const ObjectPropertyCondition&);
265 Node* load(SpeculatedType, unsigned identifierNumber, const GetByOffsetMethod&, NodeType = GetByOffset);
266 Node* load(SpeculatedType, const ObjectPropertyCondition&, NodeType = GetByOffset);
268 // Calls check() for each condition in the set: that is, it either emits checks or registers
269 // watchpoints (or a combination of the two) to make the conditions hold. If any of those
270 // conditions are no longer checkable, returns false.
271 bool check(const ObjectPropertyConditionSet&);
273 // Calls check() for those conditions that aren't the slot base, and calls load() for the slot
274 // base. Does a combination of watchpoint registration and check emission to guard the
275 // conditions, and emits code to load the value from the slot base. Returns a node containing
276 // the loaded value. Returns null if any of the conditions were no longer checkable.
277 GetByOffsetMethod planLoad(const ObjectPropertyConditionSet&);
278 Node* load(SpeculatedType, const ObjectPropertyConditionSet&, NodeType = GetByOffset);
280 void prepareToParseBlock();
283 // Parse a single basic block of bytecode instructions.
284 bool parseBlock(unsigned limit);
285 // Link block successors.
286 void linkBlock(BasicBlock*, Vector<BasicBlock*>& possibleTargets);
287 void linkBlocks(Vector<UnlinkedBlock>& unlinkedBlocks, Vector<BasicBlock*>& possibleTargets);
289 VariableAccessData* newVariableAccessData(VirtualRegister operand)
291 ASSERT(!operand.isConstant());
293 m_graph.m_variableAccessData.append(VariableAccessData(operand));
294 return &m_graph.m_variableAccessData.last();
297 // Get/Set the operands/result of a bytecode instruction.
298 Node* getDirect(VirtualRegister operand)
300 ASSERT(!operand.isConstant());
302 // Is this an argument?
303 if (operand.isArgument())
304 return getArgument(operand);
307 return getLocal(operand);
310 Node* get(VirtualRegister operand)
312 if (operand.isConstant()) {
313 unsigned constantIndex = operand.toConstantIndex();
314 unsigned oldSize = m_constants.size();
315 if (constantIndex >= oldSize || !m_constants[constantIndex]) {
316 const CodeBlock& codeBlock = *m_inlineStackTop->m_codeBlock;
317 JSValue value = codeBlock.getConstant(operand.offset());
318 SourceCodeRepresentation sourceCodeRepresentation = codeBlock.constantSourceCodeRepresentation(operand.offset());
319 if (constantIndex >= oldSize) {
320 m_constants.grow(constantIndex + 1);
321 for (unsigned i = oldSize; i < m_constants.size(); ++i)
322 m_constants[i] = nullptr;
325 Node* constantNode = nullptr;
326 if (sourceCodeRepresentation == SourceCodeRepresentation::Double)
327 constantNode = addToGraph(DoubleConstant, OpInfo(m_graph.freezeStrong(jsDoubleNumber(value.asNumber()))));
329 constantNode = addToGraph(JSConstant, OpInfo(m_graph.freezeStrong(value)));
330 m_constants[constantIndex] = constantNode;
332 ASSERT(m_constants[constantIndex]);
333 return m_constants[constantIndex];
336 if (inlineCallFrame()) {
337 if (!inlineCallFrame()->isClosureCall) {
338 JSFunction* callee = inlineCallFrame()->calleeConstant();
339 if (operand.offset() == CallFrameSlot::callee)
340 return weakJSConstant(callee);
342 } else if (operand.offset() == CallFrameSlot::callee) {
343 // We have to do some constant-folding here because this enables CreateThis folding. Note
344 // that we don't have such watchpoint-based folding for inlined uses of Callee, since in that
345 // case if the function is a singleton then we already know it.
346 if (FunctionExecutable* executable = jsDynamicCast<FunctionExecutable*>(*m_vm, m_codeBlock->ownerExecutable())) {
347 InferredValue* singleton = executable->singletonFunction();
348 if (JSValue value = singleton->inferredValue()) {
349 m_graph.watchpoints().addLazily(singleton);
350 JSFunction* function = jsCast<JSFunction*>(value);
351 return weakJSConstant(function);
354 return addToGraph(GetCallee);
357 return getDirect(m_inlineStackTop->remapOperand(operand));
361 // A normal set which follows a two-phase commit that spans code origins. During
362 // the current code origin it issues a MovHint, and at the start of the next
363 // code origin there will be a SetLocal. If the local needs flushing, the second
364 // SetLocal will be preceded with a Flush.
367 // A set where the SetLocal happens immediately and there is still a Flush. This
368 // is relevant when assigning to a local in tricky situations for the delayed
369 // SetLocal logic but where we know that we have not performed any side effects
370 // within this code origin. This is a safe replacement for NormalSet anytime we
371 // know that we have not yet performed side effects in this code origin.
372 ImmediateSetWithFlush,
374 // A set where the SetLocal happens immediately and we do not Flush it even if
375 // this is a local that is marked as needing it. This is relevant when
376 // initializing locals at the top of a function.
379 Node* setDirect(VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
381 addToGraph(MovHint, OpInfo(operand.offset()), value);
383 // We can't exit anymore because our OSR exit state has changed.
386 DelayedSetLocal delayed(currentCodeOrigin(), operand, value);
388 if (setMode == NormalSet) {
389 m_setLocalQueue.append(delayed);
393 return delayed.execute(this, setMode);
396 void processSetLocalQueue()
398 for (unsigned i = 0; i < m_setLocalQueue.size(); ++i)
399 m_setLocalQueue[i].execute(this);
400 m_setLocalQueue.resize(0);
403 Node* set(VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
405 return setDirect(m_inlineStackTop->remapOperand(operand), value, setMode);
408 Node* injectLazyOperandSpeculation(Node* node)
410 ASSERT(node->op() == GetLocal);
411 ASSERT(node->origin.semantic.bytecodeIndex == m_currentIndex);
412 ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
413 LazyOperandValueProfileKey key(m_currentIndex, node->local());
414 SpeculatedType prediction = m_inlineStackTop->m_lazyOperands.prediction(locker, key);
415 node->variableAccessData()->predict(prediction);
419 // Used in implementing get/set, above, where the operand is a local variable.
420 Node* getLocal(VirtualRegister operand)
422 unsigned local = operand.toLocal();
424 Node* node = m_currentBlock->variablesAtTail.local(local);
426 // This has two goals: 1) link together variable access datas, and 2)
427 // try to avoid creating redundant GetLocals. (1) is required for
428 // correctness - no other phase will ensure that block-local variable
429 // access data unification is done correctly. (2) is purely opportunistic
430 // and is meant as an compile-time optimization only.
432 VariableAccessData* variable;
435 variable = node->variableAccessData();
437 switch (node->op()) {
441 return node->child1().node();
446 variable = newVariableAccessData(operand);
448 node = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variable)));
449 m_currentBlock->variablesAtTail.local(local) = node;
452 Node* setLocal(const CodeOrigin& semanticOrigin, VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
454 CodeOrigin oldSemanticOrigin = m_currentSemanticOrigin;
455 m_currentSemanticOrigin = semanticOrigin;
457 unsigned local = operand.toLocal();
459 if (setMode != ImmediateNakedSet) {
460 ArgumentPosition* argumentPosition = findArgumentPositionForLocal(operand);
461 if (argumentPosition)
462 flushDirect(operand, argumentPosition);
463 else if (m_graph.needsScopeRegister() && operand == m_codeBlock->scopeRegister())
467 VariableAccessData* variableAccessData = newVariableAccessData(operand);
468 variableAccessData->mergeStructureCheckHoistingFailed(
469 m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex, BadCache));
470 variableAccessData->mergeCheckArrayHoistingFailed(
471 m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex, BadIndexingType));
472 Node* node = addToGraph(SetLocal, OpInfo(variableAccessData), value);
473 m_currentBlock->variablesAtTail.local(local) = node;
475 m_currentSemanticOrigin = oldSemanticOrigin;
479 // Used in implementing get/set, above, where the operand is an argument.
480 Node* getArgument(VirtualRegister operand)
482 unsigned argument = operand.toArgument();
483 ASSERT(argument < m_numArguments);
485 Node* node = m_currentBlock->variablesAtTail.argument(argument);
487 VariableAccessData* variable;
490 variable = node->variableAccessData();
492 switch (node->op()) {
496 return node->child1().node();
501 variable = newVariableAccessData(operand);
503 node = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variable)));
504 m_currentBlock->variablesAtTail.argument(argument) = node;
507 Node* setArgument(const CodeOrigin& semanticOrigin, VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
509 CodeOrigin oldSemanticOrigin = m_currentSemanticOrigin;
510 m_currentSemanticOrigin = semanticOrigin;
512 unsigned argument = operand.toArgument();
513 ASSERT(argument < m_numArguments);
515 VariableAccessData* variableAccessData = newVariableAccessData(operand);
517 // Always flush arguments, except for 'this'. If 'this' is created by us,
518 // then make sure that it's never unboxed.
519 if (argument || m_graph.needsFlushedThis()) {
520 if (setMode != ImmediateNakedSet)
521 flushDirect(operand);
524 if (!argument && m_codeBlock->specializationKind() == CodeForConstruct)
525 variableAccessData->mergeShouldNeverUnbox(true);
527 variableAccessData->mergeStructureCheckHoistingFailed(
528 m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex, BadCache));
529 variableAccessData->mergeCheckArrayHoistingFailed(
530 m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex, BadIndexingType));
531 Node* node = addToGraph(SetLocal, OpInfo(variableAccessData), value);
532 m_currentBlock->variablesAtTail.argument(argument) = node;
534 m_currentSemanticOrigin = oldSemanticOrigin;
538 ArgumentPosition* findArgumentPositionForArgument(int argument)
540 InlineStackEntry* stack = m_inlineStackTop;
541 while (stack->m_inlineCallFrame)
542 stack = stack->m_caller;
543 return stack->m_argumentPositions[argument];
546 ArgumentPosition* findArgumentPositionForLocal(VirtualRegister operand)
548 for (InlineStackEntry* stack = m_inlineStackTop; ; stack = stack->m_caller) {
549 InlineCallFrame* inlineCallFrame = stack->m_inlineCallFrame;
550 if (!inlineCallFrame)
552 if (operand.offset() < static_cast<int>(inlineCallFrame->stackOffset + CallFrame::headerSizeInRegisters))
554 if (operand.offset() == inlineCallFrame->stackOffset + CallFrame::thisArgumentOffset())
556 if (operand.offset() >= static_cast<int>(inlineCallFrame->stackOffset + CallFrame::thisArgumentOffset() + inlineCallFrame->arguments.size()))
558 int argument = VirtualRegister(operand.offset() - inlineCallFrame->stackOffset).toArgument();
559 return stack->m_argumentPositions[argument];
564 ArgumentPosition* findArgumentPosition(VirtualRegister operand)
566 if (operand.isArgument())
567 return findArgumentPositionForArgument(operand.toArgument());
568 return findArgumentPositionForLocal(operand);
571 void flush(VirtualRegister operand)
573 flushDirect(m_inlineStackTop->remapOperand(operand));
576 void flushDirect(VirtualRegister operand)
578 flushDirect(operand, findArgumentPosition(operand));
581 void flushDirect(VirtualRegister operand, ArgumentPosition* argumentPosition)
583 addFlushOrPhantomLocal<Flush>(operand, argumentPosition);
586 template<NodeType nodeType>
587 void addFlushOrPhantomLocal(VirtualRegister operand, ArgumentPosition* argumentPosition)
589 ASSERT(!operand.isConstant());
591 Node* node = m_currentBlock->variablesAtTail.operand(operand);
593 VariableAccessData* variable;
596 variable = node->variableAccessData();
598 variable = newVariableAccessData(operand);
600 node = addToGraph(nodeType, OpInfo(variable));
601 m_currentBlock->variablesAtTail.operand(operand) = node;
602 if (argumentPosition)
603 argumentPosition->addVariable(variable);
606 void phantomLocalDirect(VirtualRegister operand)
608 addFlushOrPhantomLocal<PhantomLocal>(operand, findArgumentPosition(operand));
611 void flush(InlineStackEntry* inlineStackEntry)
614 if (InlineCallFrame* inlineCallFrame = inlineStackEntry->m_inlineCallFrame) {
615 ASSERT(!m_hasDebuggerEnabled);
616 numArguments = inlineCallFrame->arguments.size();
617 if (inlineCallFrame->isClosureCall)
618 flushDirect(inlineStackEntry->remapOperand(VirtualRegister(CallFrameSlot::callee)));
619 if (inlineCallFrame->isVarargs())
620 flushDirect(inlineStackEntry->remapOperand(VirtualRegister(CallFrameSlot::argumentCount)));
622 numArguments = inlineStackEntry->m_codeBlock->numParameters();
623 for (unsigned argument = numArguments; argument-- > 1;)
624 flushDirect(inlineStackEntry->remapOperand(virtualRegisterForArgument(argument)));
625 if (!inlineStackEntry->m_inlineCallFrame && m_graph.needsFlushedThis())
626 flushDirect(virtualRegisterForArgument(0));
627 if (m_graph.needsScopeRegister())
628 flushDirect(m_codeBlock->scopeRegister());
631 void flushForTerminal()
633 CodeOrigin origin = currentCodeOrigin();
634 unsigned bytecodeIndex = origin.bytecodeIndex;
636 for (InlineStackEntry* inlineStackEntry = m_inlineStackTop; inlineStackEntry; inlineStackEntry = inlineStackEntry->m_caller) {
637 flush(inlineStackEntry);
639 ASSERT(origin.inlineCallFrame == inlineStackEntry->m_inlineCallFrame);
640 InlineCallFrame* inlineCallFrame = inlineStackEntry->m_inlineCallFrame;
641 CodeBlock* codeBlock = m_graph.baselineCodeBlockFor(inlineCallFrame);
642 FullBytecodeLiveness& fullLiveness = m_graph.livenessFor(codeBlock);
643 const FastBitVector& livenessAtBytecode = fullLiveness.getLiveness(bytecodeIndex);
645 for (unsigned local = codeBlock->m_numCalleeLocals; local--;) {
646 if (livenessAtBytecode[local]) {
647 VirtualRegister reg = virtualRegisterForLocal(local);
649 reg = inlineStackEntry->remapOperand(reg);
650 phantomLocalDirect(reg);
654 if (inlineCallFrame) {
655 bytecodeIndex = inlineCallFrame->directCaller.bytecodeIndex;
656 origin = inlineCallFrame->directCaller;
661 void flushForReturn()
663 flush(m_inlineStackTop);
666 void flushIfTerminal(SwitchData& data)
668 if (data.fallThrough.bytecodeIndex() > m_currentIndex)
671 for (unsigned i = data.cases.size(); i--;) {
672 if (data.cases[i].target.bytecodeIndex() > m_currentIndex)
679 // Assumes that the constant should be strongly marked.
680 Node* jsConstant(JSValue constantValue)
682 return addToGraph(JSConstant, OpInfo(m_graph.freezeStrong(constantValue)));
685 Node* weakJSConstant(JSValue constantValue)
687 return addToGraph(JSConstant, OpInfo(m_graph.freeze(constantValue)));
690 // Helper functions to get/set the this value.
693 return get(m_inlineStackTop->m_codeBlock->thisRegister());
696 void setThis(Node* value)
698 set(m_inlineStackTop->m_codeBlock->thisRegister(), value);
701 InlineCallFrame* inlineCallFrame()
703 return m_inlineStackTop->m_inlineCallFrame;
706 bool allInlineFramesAreTailCalls()
708 return !inlineCallFrame() || !inlineCallFrame()->getCallerSkippingTailCalls();
711 CodeOrigin currentCodeOrigin()
713 return CodeOrigin(m_currentIndex, inlineCallFrame());
716 NodeOrigin currentNodeOrigin()
721 if (m_currentSemanticOrigin.isSet())
722 semantic = m_currentSemanticOrigin;
724 semantic = currentCodeOrigin();
726 forExit = currentCodeOrigin();
728 return NodeOrigin(semantic, forExit, m_exitOK);
731 BranchData* branchData(unsigned taken, unsigned notTaken)
733 // We assume that branches originating from bytecode always have a fall-through. We
734 // use this assumption to avoid checking for the creation of terminal blocks.
735 ASSERT((taken > m_currentIndex) || (notTaken > m_currentIndex));
736 BranchData* data = m_graph.m_branchData.add();
737 *data = BranchData::withBytecodeIndices(taken, notTaken);
741 Node* addToGraph(Node* node)
743 if (Options::verboseDFGByteCodeParsing())
744 dataLog(" appended ", node, " ", Graph::opName(node->op()), "\n");
745 m_currentBlock->append(node);
746 if (clobbersExitState(m_graph, node))
751 Node* addToGraph(NodeType op, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
753 Node* result = m_graph.addNode(
754 op, currentNodeOrigin(), Edge(child1), Edge(child2),
756 return addToGraph(result);
758 Node* addToGraph(NodeType op, Edge child1, Edge child2 = Edge(), Edge child3 = Edge())
760 Node* result = m_graph.addNode(
761 op, currentNodeOrigin(), child1, child2, child3);
762 return addToGraph(result);
764 Node* addToGraph(NodeType op, OpInfo info, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
766 Node* result = m_graph.addNode(
767 op, currentNodeOrigin(), info, Edge(child1), Edge(child2),
769 return addToGraph(result);
771 Node* addToGraph(NodeType op, OpInfo info, Edge child1, Edge child2 = Edge(), Edge child3 = Edge())
773 Node* result = m_graph.addNode(op, currentNodeOrigin(), info, child1, child2, child3);
774 return addToGraph(result);
776 Node* addToGraph(NodeType op, OpInfo info1, OpInfo info2, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
778 Node* result = m_graph.addNode(
779 op, currentNodeOrigin(), info1, info2,
780 Edge(child1), Edge(child2), Edge(child3));
781 return addToGraph(result);
783 Node* addToGraph(NodeType op, OpInfo info1, OpInfo info2, Edge child1, Edge child2 = Edge(), Edge child3 = Edge())
785 Node* result = m_graph.addNode(
786 op, currentNodeOrigin(), info1, info2, child1, child2, child3);
787 return addToGraph(result);
790 Node* addToGraph(Node::VarArgTag, NodeType op, OpInfo info1, OpInfo info2 = OpInfo())
792 Node* result = m_graph.addNode(
793 Node::VarArg, op, currentNodeOrigin(), info1, info2,
794 m_graph.m_varArgChildren.size() - m_numPassedVarArgs, m_numPassedVarArgs);
797 m_numPassedVarArgs = 0;
802 void addVarArgChild(Node* child)
804 m_graph.m_varArgChildren.append(Edge(child));
805 m_numPassedVarArgs++;
808 Node* addCallWithoutSettingResult(
809 NodeType op, OpInfo opInfo, Node* callee, int argCount, int registerOffset,
812 addVarArgChild(callee);
813 size_t parameterSlots = Graph::parameterSlotsForArgCount(argCount);
815 if (parameterSlots > m_parameterSlots)
816 m_parameterSlots = parameterSlots;
818 for (int i = 0; i < argCount; ++i)
819 addVarArgChild(get(virtualRegisterForArgument(i, registerOffset)));
821 return addToGraph(Node::VarArg, op, opInfo, prediction);
825 int result, NodeType op, const DOMJIT::Signature* signature, Node* callee, int argCount, int registerOffset,
826 SpeculatedType prediction)
828 if (op == TailCall) {
829 if (allInlineFramesAreTailCalls())
830 return addCallWithoutSettingResult(op, OpInfo(signature), callee, argCount, registerOffset, OpInfo());
831 op = TailCallInlinedCaller;
835 Node* call = addCallWithoutSettingResult(
836 op, OpInfo(signature), callee, argCount, registerOffset, OpInfo(prediction));
837 VirtualRegister resultReg(result);
838 if (resultReg.isValid())
839 set(resultReg, call);
843 Node* cellConstantWithStructureCheck(JSCell* object, Structure* structure)
845 // FIXME: This should route to emitPropertyCheck, not the other way around. But currently,
846 // this gets no profit from using emitPropertyCheck() since we'll non-adaptively watch the
847 // object's structure as soon as we make it a weakJSCosntant.
848 Node* objectNode = weakJSConstant(object);
849 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(structure)), objectNode);
853 SpeculatedType getPredictionWithoutOSRExit(unsigned bytecodeIndex)
855 SpeculatedType prediction;
857 ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
858 prediction = m_inlineStackTop->m_profiledBlock->valueProfilePredictionForBytecodeOffset(locker, bytecodeIndex);
861 if (prediction != SpecNone)
864 // If we have no information about the values this
865 // node generates, we check if by any chance it is
866 // a tail call opcode. In that case, we walk up the
867 // inline frames to find a call higher in the call
868 // chain and use its prediction. If we only have
869 // inlined tail call frames, we use SpecFullTop
870 // to avoid a spurious OSR exit.
871 Instruction* instruction = m_inlineStackTop->m_profiledBlock->instructions().begin() + bytecodeIndex;
872 OpcodeID opcodeID = m_vm->interpreter->getOpcodeID(instruction->u.opcode);
876 case op_tail_call_varargs:
877 case op_tail_call_forward_arguments: {
878 // Things should be more permissive to us returning BOTTOM instead of TOP here.
879 // Currently, this will cause us to Force OSR exit. This is bad because returning
880 // TOP will cause anything that transitively touches this speculated type to
881 // also become TOP during prediction propagation.
882 // https://bugs.webkit.org/show_bug.cgi?id=164337
883 if (!inlineCallFrame())
886 CodeOrigin* codeOrigin = inlineCallFrame()->getCallerSkippingTailCalls();
890 InlineStackEntry* stack = m_inlineStackTop;
891 while (stack->m_inlineCallFrame != codeOrigin->inlineCallFrame)
892 stack = stack->m_caller;
894 bytecodeIndex = codeOrigin->bytecodeIndex;
895 CodeBlock* profiledBlock = stack->m_profiledBlock;
896 ConcurrentJSLocker locker(profiledBlock->m_lock);
897 return profiledBlock->valueProfilePredictionForBytecodeOffset(locker, bytecodeIndex);
904 RELEASE_ASSERT_NOT_REACHED();
908 SpeculatedType getPrediction(unsigned bytecodeIndex)
910 SpeculatedType prediction = getPredictionWithoutOSRExit(bytecodeIndex);
912 if (prediction == SpecNone) {
913 // We have no information about what values this node generates. Give up
914 // on executing this code, since we're likely to do more damage than good.
915 addToGraph(ForceOSRExit);
921 SpeculatedType getPredictionWithoutOSRExit()
923 return getPredictionWithoutOSRExit(m_currentIndex);
926 SpeculatedType getPrediction()
928 return getPrediction(m_currentIndex);
931 ArrayMode getArrayMode(ArrayProfile* profile, Array::Action action)
933 ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
934 profile->computeUpdatedPrediction(locker, m_inlineStackTop->m_profiledBlock);
935 bool makeSafe = profile->outOfBounds(locker);
936 return ArrayMode::fromObserved(locker, profile, action, makeSafe);
939 ArrayMode getArrayMode(ArrayProfile* profile)
941 return getArrayMode(profile, Array::Read);
944 Node* makeSafe(Node* node)
946 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
947 node->mergeFlags(NodeMayOverflowInt32InDFG);
948 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
949 node->mergeFlags(NodeMayNegZeroInDFG);
951 if (!isX86() && node->op() == ArithMod)
955 ArithProfile* arithProfile = m_inlineStackTop->m_profiledBlock->arithProfileForBytecodeOffset(m_currentIndex);
957 switch (node->op()) {
961 if (arithProfile->didObserveDouble())
962 node->mergeFlags(NodeMayHaveDoubleResult);
963 if (arithProfile->didObserveNonNumber())
964 node->mergeFlags(NodeMayHaveNonNumberResult);
968 if (arithProfile->didObserveInt52Overflow())
969 node->mergeFlags(NodeMayOverflowInt52);
970 if (arithProfile->didObserveInt32Overflow() || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
971 node->mergeFlags(NodeMayOverflowInt32InBaseline);
972 if (arithProfile->didObserveNegZeroDouble() || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
973 node->mergeFlags(NodeMayNegZeroInBaseline);
974 if (arithProfile->didObserveDouble())
975 node->mergeFlags(NodeMayHaveDoubleResult);
976 if (arithProfile->didObserveNonNumber())
977 node->mergeFlags(NodeMayHaveNonNumberResult);
981 ASSERT_WITH_MESSAGE(!arithProfile->didObserveNonNumber(), "op_negate starts with a toNumber() on the argument, it should only produce numbers.");
983 if (arithProfile->lhsObservedType().sawNumber() || arithProfile->didObserveDouble())
984 node->mergeFlags(NodeMayHaveDoubleResult);
985 if (arithProfile->didObserveNegZeroDouble() || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
986 node->mergeFlags(NodeMayNegZeroInBaseline);
987 if (arithProfile->didObserveInt32Overflow() || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
988 node->mergeFlags(NodeMayOverflowInt32InBaseline);
998 if (m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)) {
999 switch (node->op()) {
1000 case UInt32ToNumber:
1004 case ArithMod: // for ArithMod "MayOverflow" means we tried to divide by zero, or we saw double.
1005 node->mergeFlags(NodeMayOverflowInt32InBaseline);
1016 Node* makeDivSafe(Node* node)
1018 ASSERT(node->op() == ArithDiv);
1020 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
1021 node->mergeFlags(NodeMayOverflowInt32InDFG);
1022 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
1023 node->mergeFlags(NodeMayNegZeroInDFG);
1025 // The main slow case counter for op_div in the old JIT counts only when
1026 // the operands are not numbers. We don't care about that since we already
1027 // have speculations in place that take care of that separately. We only
1028 // care about when the outcome of the division is not an integer, which
1029 // is what the special fast case counter tells us.
1031 if (!m_inlineStackTop->m_profiledBlock->couldTakeSpecialFastCase(m_currentIndex))
1034 // FIXME: It might be possible to make this more granular.
1035 node->mergeFlags(NodeMayOverflowInt32InBaseline | NodeMayNegZeroInBaseline);
1040 void noticeArgumentsUse()
1042 // All of the arguments in this function need to be formatted as JSValues because we will
1043 // load from them in a random-access fashion and we don't want to have to switch on
1046 for (ArgumentPosition* argument : m_inlineStackTop->m_argumentPositions)
1047 argument->mergeShouldNeverUnbox(true);
1050 bool needsDynamicLookup(ResolveType, OpcodeID);
1053 CodeBlock* m_codeBlock;
1054 CodeBlock* m_profiledBlock;
1057 // The current block being generated.
1058 BasicBlock* m_currentBlock;
1059 // The bytecode index of the current instruction being generated.
1060 unsigned m_currentIndex;
1061 // The semantic origin of the current node if different from the current Index.
1062 CodeOrigin m_currentSemanticOrigin;
1063 // True if it's OK to OSR exit right now.
1064 bool m_exitOK { false };
1066 FrozenValue* m_constantUndefined;
1067 FrozenValue* m_constantNull;
1068 FrozenValue* m_constantNaN;
1069 FrozenValue* m_constantOne;
1070 Vector<Node*, 16> m_constants;
1072 // The number of arguments passed to the function.
1073 unsigned m_numArguments;
1074 // The number of locals (vars + temporaries) used in the function.
1075 unsigned m_numLocals;
1076 // The number of slots (in units of sizeof(Register)) that we need to
1077 // preallocate for arguments to outgoing calls from this frame. This
1078 // number includes the CallFrame slots that we initialize for the callee
1079 // (but not the callee-initialized CallerFrame and ReturnPC slots).
1080 // This number is 0 if and only if this function is a leaf.
1081 unsigned m_parameterSlots;
1082 // The number of var args passed to the next var arg node.
1083 unsigned m_numPassedVarArgs;
1085 HashMap<ConstantBufferKey, unsigned> m_constantBufferCache;
1087 struct InlineStackEntry {
1088 ByteCodeParser* m_byteCodeParser;
1090 CodeBlock* m_codeBlock;
1091 CodeBlock* m_profiledBlock;
1092 InlineCallFrame* m_inlineCallFrame;
1094 ScriptExecutable* executable() { return m_codeBlock->ownerScriptExecutable(); }
1096 QueryableExitProfile m_exitProfile;
1098 // Remapping of identifier and constant numbers from the code block being
1099 // inlined (inline callee) to the code block that we're inlining into
1100 // (the machine code block, which is the transitive, though not necessarily
1102 Vector<unsigned> m_identifierRemap;
1103 Vector<unsigned> m_constantBufferRemap;
1104 Vector<unsigned> m_switchRemap;
1106 // Blocks introduced by this code block, which need successor linking.
1107 // May include up to one basic block that includes the continuation after
1108 // the callsite in the caller. These must be appended in the order that they
1109 // are created, but their bytecodeBegin values need not be in order as they
1111 Vector<UnlinkedBlock> m_unlinkedBlocks;
1113 // Potential block linking targets. Must be sorted by bytecodeBegin, and
1114 // cannot have two blocks that have the same bytecodeBegin.
1115 Vector<BasicBlock*> m_blockLinkingTargets;
1117 // If the callsite's basic block was split into two, then this will be
1118 // the head of the callsite block. It needs its successors linked to the
1119 // m_unlinkedBlocks, but not the other way around: there's no way for
1120 // any blocks in m_unlinkedBlocks to jump back into this block.
1121 BasicBlock* m_callsiteBlockHead;
1123 // Does the callsite block head need linking? This is typically true
1124 // but will be false for the machine code block's inline stack entry
1125 // (since that one is not inlined) and for cases where an inline callee
1126 // did the linking for us.
1127 bool m_callsiteBlockHeadNeedsLinking;
1129 VirtualRegister m_returnValue;
1131 // Speculations about variable types collected from the profiled code block,
1132 // which are based on OSR exit profiles that past DFG compilatins of this
1133 // code block had gathered.
1134 LazyOperandValueProfileParser m_lazyOperands;
1136 CallLinkInfoMap m_callLinkInfos;
1137 StubInfoMap m_stubInfos;
1138 ByValInfoMap m_byValInfos;
1140 // Did we see any returns? We need to handle the (uncommon but necessary)
1141 // case where a procedure that does not return was inlined.
1144 // Did we have any early returns?
1145 bool m_didEarlyReturn;
1147 // Pointers to the argument position trackers for this slice of code.
1148 Vector<ArgumentPosition*> m_argumentPositions;
1150 InlineStackEntry* m_caller;
1155 CodeBlock* profiledBlock,
1156 BasicBlock* callsiteBlockHead,
1157 JSFunction* callee, // Null if this is a closure call.
1158 VirtualRegister returnValueVR,
1159 VirtualRegister inlineCallFrameStart,
1160 int argumentCountIncludingThis,
1161 InlineCallFrame::Kind);
1165 m_byteCodeParser->m_inlineStackTop = m_caller;
1168 VirtualRegister remapOperand(VirtualRegister operand) const
1170 if (!m_inlineCallFrame)
1173 ASSERT(!operand.isConstant());
1175 return VirtualRegister(operand.offset() + m_inlineCallFrame->stackOffset);
1179 InlineStackEntry* m_inlineStackTop;
1181 struct DelayedSetLocal {
1182 CodeOrigin m_origin;
1183 VirtualRegister m_operand;
1186 DelayedSetLocal() { }
1187 DelayedSetLocal(const CodeOrigin& origin, VirtualRegister operand, Node* value)
1189 , m_operand(operand)
1194 Node* execute(ByteCodeParser* parser, SetMode setMode = NormalSet)
1196 if (m_operand.isArgument())
1197 return parser->setArgument(m_origin, m_operand, m_value, setMode);
1198 return parser->setLocal(m_origin, m_operand, m_value, setMode);
1202 Vector<DelayedSetLocal, 2> m_setLocalQueue;
1204 CodeBlock* m_dfgCodeBlock;
1205 CallLinkStatus::ContextMap m_callContextMap;
1206 StubInfoMap m_dfgStubInfos;
1208 Instruction* m_currentInstruction;
1209 bool m_hasDebuggerEnabled;
1213 // if (true) { ...; goto label; } else label: continue
1214 // Allows using NEXT_OPCODE as a statement, even in unbraced if+else, while containing a `continue`.
1215 // The more common idiom:
1216 // do { ...; } while (false)
1217 // Doesn't allow using `continue`.
1218 #define NEXT_OPCODE(name) \
1220 m_currentIndex += OPCODE_LENGTH(name); \
1221 goto WTF_CONCAT(NEXT_OPCODE_, __LINE__); /* Need a unique label: usable more than once per function. */ \
1223 WTF_CONCAT(NEXT_OPCODE_, __LINE__): \
1226 // Chain expressions with comma-operator so LAST_OPCODE can be used as a statement.
1227 #define LAST_OPCODE(name) \
1229 m_currentIndex += OPCODE_LENGTH(name), \
1231 shouldContinueParsing
1233 ByteCodeParser::Terminality ByteCodeParser::handleCall(Instruction* pc, NodeType op, CallMode callMode)
1235 ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct));
1236 ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_tail_call));
1238 pc[1].u.operand, op, callMode, OPCODE_LENGTH(op_call),
1239 pc[2].u.operand, pc[3].u.operand, -pc[4].u.operand);
1242 ByteCodeParser::Terminality ByteCodeParser::handleCall(
1243 int result, NodeType op, CallMode callMode, unsigned instructionSize,
1244 int callee, int argumentCountIncludingThis, int registerOffset)
1246 Node* callTarget = get(VirtualRegister(callee));
1248 CallLinkStatus callLinkStatus = CallLinkStatus::computeFor(
1249 m_inlineStackTop->m_profiledBlock, currentCodeOrigin(),
1250 m_inlineStackTop->m_callLinkInfos, m_callContextMap);
1253 result, op, callMode, instructionSize, callTarget,
1254 argumentCountIncludingThis, registerOffset, callLinkStatus);
1257 ByteCodeParser::Terminality ByteCodeParser::handleCall(
1258 int result, NodeType op, CallMode callMode, unsigned instructionSize,
1259 Node* callTarget, int argumentCountIncludingThis, int registerOffset,
1260 CallLinkStatus callLinkStatus)
1263 result, op, InlineCallFrame::kindFor(callMode), instructionSize, callTarget, argumentCountIncludingThis,
1264 registerOffset, callLinkStatus, getPrediction());
1267 void ByteCodeParser::refineStatically(CallLinkStatus& callLinkStatus, Node* callTarget)
1269 if (callTarget->isCellConstant()) {
1270 callLinkStatus.setProvenConstantCallee(CallVariant(callTarget->asCell()));
1275 ByteCodeParser::Terminality ByteCodeParser::handleCall(
1276 int result, NodeType op, InlineCallFrame::Kind kind, unsigned instructionSize,
1277 Node* callTarget, int argumentCountIncludingThis, int registerOffset,
1278 CallLinkStatus callLinkStatus, SpeculatedType prediction)
1280 ASSERT(registerOffset <= 0);
1282 refineStatically(callLinkStatus, callTarget);
1284 if (Options::verboseDFGByteCodeParsing())
1285 dataLog(" Handling call at ", currentCodeOrigin(), ": ", callLinkStatus, "\n");
1287 if (!callLinkStatus.canOptimize()) {
1288 // Oddly, this conflates calls that haven't executed with calls that behaved sufficiently polymorphically
1289 // that we cannot optimize them.
1291 Node* callNode = addCall(result, op, nullptr, callTarget, argumentCountIncludingThis, registerOffset, prediction);
1292 if (callNode->op() == TailCall)
1294 ASSERT(callNode->op() != TailCallVarargs && callNode->op() != TailCallForwardVarargs);
1298 unsigned nextOffset = m_currentIndex + instructionSize;
1300 if (handleInlining(callTarget, result, callLinkStatus, registerOffset, virtualRegisterForArgument(0, registerOffset), VirtualRegister(), 0, argumentCountIncludingThis, nextOffset, op, kind, prediction)) {
1301 if (m_graph.compilation())
1302 m_graph.compilation()->noticeInlinedCall();
1306 Node* callNode = addCall(result, op, nullptr, callTarget, argumentCountIncludingThis, registerOffset, prediction);
1307 if (callNode->op() == TailCall)
1309 ASSERT(callNode->op() != TailCallVarargs && callNode->op() != TailCallForwardVarargs);
1313 ByteCodeParser::Terminality ByteCodeParser::handleVarargsCall(Instruction* pc, NodeType op, CallMode callMode)
1315 ASSERT(OPCODE_LENGTH(op_call_varargs) == OPCODE_LENGTH(op_construct_varargs));
1316 ASSERT(OPCODE_LENGTH(op_call_varargs) == OPCODE_LENGTH(op_tail_call_varargs));
1318 int result = pc[1].u.operand;
1319 int callee = pc[2].u.operand;
1320 int thisReg = pc[3].u.operand;
1321 int arguments = pc[4].u.operand;
1322 int firstFreeReg = pc[5].u.operand;
1323 int firstVarArgOffset = pc[6].u.operand;
1325 SpeculatedType prediction = getPrediction();
1327 Node* callTarget = get(VirtualRegister(callee));
1329 CallLinkStatus callLinkStatus = CallLinkStatus::computeFor(
1330 m_inlineStackTop->m_profiledBlock, currentCodeOrigin(),
1331 m_inlineStackTop->m_callLinkInfos, m_callContextMap);
1332 refineStatically(callLinkStatus, callTarget);
1334 if (Options::verboseDFGByteCodeParsing())
1335 dataLog(" Varargs call link status at ", currentCodeOrigin(), ": ", callLinkStatus, "\n");
1337 if (callLinkStatus.canOptimize()
1338 && handleInlining(callTarget, result, callLinkStatus, firstFreeReg, VirtualRegister(thisReg), VirtualRegister(arguments), firstVarArgOffset, 0, m_currentIndex + OPCODE_LENGTH(op_call_varargs), op, InlineCallFrame::varargsKindFor(callMode), prediction)) {
1339 if (m_graph.compilation())
1340 m_graph.compilation()->noticeInlinedCall();
1344 CallVarargsData* data = m_graph.m_callVarargsData.add();
1345 data->firstVarArgOffset = firstVarArgOffset;
1347 Node* thisChild = get(VirtualRegister(thisReg));
1348 Node* argumentsChild = nullptr;
1349 if (op != TailCallForwardVarargs)
1350 argumentsChild = get(VirtualRegister(arguments));
1352 if (op == TailCallVarargs || op == TailCallForwardVarargs) {
1353 if (allInlineFramesAreTailCalls()) {
1354 addToGraph(op, OpInfo(data), OpInfo(), callTarget, thisChild, argumentsChild);
1357 op = op == TailCallVarargs ? TailCallVarargsInlinedCaller : TailCallForwardVarargsInlinedCaller;
1360 Node* call = addToGraph(op, OpInfo(data), OpInfo(prediction), callTarget, thisChild, argumentsChild);
1361 VirtualRegister resultReg(result);
1362 if (resultReg.isValid())
1363 set(resultReg, call);
1367 void ByteCodeParser::emitFunctionChecks(CallVariant callee, Node* callTarget, VirtualRegister thisArgumentReg)
1370 if (thisArgumentReg.isValid())
1371 thisArgument = get(thisArgumentReg);
1373 thisArgument = nullptr;
1376 Node* callTargetForCheck;
1377 if (callee.isClosureCall()) {
1378 calleeCell = callee.executable();
1379 callTargetForCheck = addToGraph(GetExecutable, callTarget);
1381 calleeCell = callee.nonExecutableCallee();
1382 callTargetForCheck = callTarget;
1387 addToGraph(Phantom, thisArgument);
1388 addToGraph(CheckCell, OpInfo(m_graph.freeze(calleeCell)), callTargetForCheck);
1391 Node* ByteCodeParser::getArgumentCount()
1393 Node* argumentCount;
1394 if (m_inlineStackTop->m_inlineCallFrame) {
1395 if (m_inlineStackTop->m_inlineCallFrame->isVarargs())
1396 argumentCount = get(VirtualRegister(CallFrameSlot::argumentCount));
1398 argumentCount = jsConstant(m_graph.freeze(jsNumber(m_inlineStackTop->m_inlineCallFrame->arguments.size()))->value());
1400 argumentCount = addToGraph(GetArgumentCountIncludingThis, OpInfo(0), OpInfo(SpecInt32Only));
1401 return argumentCount;
1404 void ByteCodeParser::emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis)
1406 for (int i = 0; i < argumentCountIncludingThis; ++i)
1407 addToGraph(Phantom, get(virtualRegisterForArgument(i, registerOffset)));
1410 unsigned ByteCodeParser::inliningCost(CallVariant callee, int argumentCountIncludingThis, CallMode callMode)
1412 CodeSpecializationKind kind = specializationKindFor(callMode);
1414 dataLog("Considering inlining ", callee, " into ", currentCodeOrigin(), "\n");
1416 if (m_hasDebuggerEnabled) {
1418 dataLog(" Failing because the debugger is in use.\n");
1422 FunctionExecutable* executable = callee.functionExecutable();
1425 dataLog(" Failing because there is no function executable.\n");
1429 // Do we have a code block, and does the code block's size match the heuristics/requirements for
1430 // being an inline candidate? We might not have a code block (1) if code was thrown away,
1431 // (2) if we simply hadn't actually made this call yet or (3) code is a builtin function and
1432 // specialization kind is construct. In the former 2 cases, we could still theoretically attempt
1433 // to inline it if we had a static proof of what was being called; this might happen for example
1434 // if you call a global function, where watchpointing gives us static information. Overall,
1435 // it's a rare case because we expect that any hot callees would have already been compiled.
1436 CodeBlock* codeBlock = executable->baselineCodeBlockFor(kind);
1439 dataLog(" Failing because no code block available.\n");
1443 // Does the number of arguments we're passing match the arity of the target? We currently
1444 // inline only if the number of arguments passed is greater than or equal to the number
1445 // arguments expected.
1446 if (codeBlock->numParameters() > argumentCountIncludingThis) {
1448 dataLog(" Failing because of arity mismatch.\n");
1452 CapabilityLevel capabilityLevel = inlineFunctionForCapabilityLevel(
1453 codeBlock, kind, callee.isClosureCall());
1455 dataLog(" Call mode: ", callMode, "\n");
1456 dataLog(" Is closure call: ", callee.isClosureCall(), "\n");
1457 dataLog(" Capability level: ", capabilityLevel, "\n");
1458 dataLog(" Might inline function: ", mightInlineFunctionFor(codeBlock, kind), "\n");
1459 dataLog(" Might compile function: ", mightCompileFunctionFor(codeBlock, kind), "\n");
1460 dataLog(" Is supported for inlining: ", isSupportedForInlining(codeBlock), "\n");
1461 dataLog(" Is inlining candidate: ", codeBlock->ownerScriptExecutable()->isInliningCandidate(), "\n");
1463 if (!canInline(capabilityLevel)) {
1465 dataLog(" Failing because the function is not inlineable.\n");
1469 // Check if the caller is already too large. We do this check here because that's just
1470 // where we happen to also have the callee's code block, and we want that for the
1471 // purpose of unsetting SABI.
1472 if (!isSmallEnoughToInlineCodeInto(m_codeBlock)) {
1473 codeBlock->m_shouldAlwaysBeInlined = false;
1475 dataLog(" Failing because the caller is too large.\n");
1479 // FIXME: this should be better at predicting how much bloat we will introduce by inlining
1481 // https://bugs.webkit.org/show_bug.cgi?id=127627
1483 // FIXME: We currently inline functions that have run in LLInt but not in Baseline. These
1484 // functions have very low fidelity profiling, and presumably they weren't very hot if they
1485 // haven't gotten to Baseline yet. Consider not inlining these functions.
1486 // https://bugs.webkit.org/show_bug.cgi?id=145503
1488 // Have we exceeded inline stack depth, or are we trying to inline a recursive call to
1489 // too many levels? If either of these are detected, then don't inline. We adjust our
1490 // heuristics if we are dealing with a function that cannot otherwise be compiled.
1493 unsigned recursion = 0;
1495 for (InlineStackEntry* entry = m_inlineStackTop; entry; entry = entry->m_caller) {
1497 if (depth >= Options::maximumInliningDepth()) {
1499 dataLog(" Failing because depth exceeded.\n");
1503 if (entry->executable() == executable) {
1505 if (recursion >= Options::maximumInliningRecursion()) {
1507 dataLog(" Failing because recursion detected.\n");
1514 dataLog(" Inlining should be possible.\n");
1516 // It might be possible to inline.
1517 return codeBlock->instructionCount();
1520 template<typename ChecksFunctor>
1521 void ByteCodeParser::inlineCall(Node* callTargetNode, int resultOperand, CallVariant callee, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind kind, CallerLinkability callerLinkability, const ChecksFunctor& insertChecks)
1523 CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1525 ASSERT(inliningCost(callee, argumentCountIncludingThis, InlineCallFrame::callModeFor(kind)) != UINT_MAX);
1527 CodeBlock* codeBlock = callee.functionExecutable()->baselineCodeBlockFor(specializationKind);
1528 insertChecks(codeBlock);
1530 // FIXME: Don't flush constants!
1532 int inlineCallFrameStart = m_inlineStackTop->remapOperand(VirtualRegister(registerOffset)).offset() + CallFrame::headerSizeInRegisters;
1535 VirtualRegister(inlineCallFrameStart).toLocal() + 1 +
1536 CallFrame::headerSizeInRegisters + codeBlock->m_numCalleeLocals);
1538 size_t argumentPositionStart = m_graph.m_argumentPositions.size();
1540 VirtualRegister resultReg(resultOperand);
1541 if (resultReg.isValid())
1542 resultReg = m_inlineStackTop->remapOperand(resultReg);
1544 VariableAccessData* calleeVariable = nullptr;
1545 if (callee.isClosureCall()) {
1546 Node* calleeSet = set(
1547 VirtualRegister(registerOffset + CallFrameSlot::callee), callTargetNode, ImmediateNakedSet);
1549 calleeVariable = calleeSet->variableAccessData();
1550 calleeVariable->mergeShouldNeverUnbox(true);
1553 InlineStackEntry inlineStackEntry(
1554 this, codeBlock, codeBlock, m_graph.lastBlock(), callee.function(), resultReg,
1555 (VirtualRegister)inlineCallFrameStart, argumentCountIncludingThis, kind);
1557 // This is where the actual inlining really happens.
1558 unsigned oldIndex = m_currentIndex;
1561 // At this point, it's again OK to OSR exit.
1564 InlineVariableData inlineVariableData;
1565 inlineVariableData.inlineCallFrame = m_inlineStackTop->m_inlineCallFrame;
1566 inlineVariableData.argumentPositionStart = argumentPositionStart;
1567 inlineVariableData.calleeVariable = 0;
1570 m_inlineStackTop->m_inlineCallFrame->isClosureCall
1571 == callee.isClosureCall());
1572 if (callee.isClosureCall()) {
1573 RELEASE_ASSERT(calleeVariable);
1574 inlineVariableData.calleeVariable = calleeVariable;
1577 m_graph.m_inlineVariableData.append(inlineVariableData);
1580 clearCaches(); // Reset our state now that we're back to the outer code.
1582 m_currentIndex = oldIndex;
1585 // If the inlined code created some new basic blocks, then we have linking to do.
1586 if (inlineStackEntry.m_callsiteBlockHead != m_graph.lastBlock()) {
1588 ASSERT(!inlineStackEntry.m_unlinkedBlocks.isEmpty());
1589 if (inlineStackEntry.m_callsiteBlockHeadNeedsLinking)
1590 linkBlock(inlineStackEntry.m_callsiteBlockHead, inlineStackEntry.m_blockLinkingTargets);
1592 ASSERT(inlineStackEntry.m_callsiteBlockHead->isLinked);
1594 if (callerLinkability == CallerDoesNormalLinking)
1595 cancelLinkingForBlock(inlineStackEntry.m_caller, inlineStackEntry.m_callsiteBlockHead);
1597 linkBlocks(inlineStackEntry.m_unlinkedBlocks, inlineStackEntry.m_blockLinkingTargets);
1599 ASSERT(inlineStackEntry.m_unlinkedBlocks.isEmpty());
1601 BasicBlock* lastBlock = m_graph.lastBlock();
1602 // If there was a return, but no early returns, then we're done. We allow parsing of
1603 // the caller to continue in whatever basic block we're in right now.
1604 if (!inlineStackEntry.m_didEarlyReturn && inlineStackEntry.m_didReturn) {
1605 if (Options::verboseDFGByteCodeParsing())
1606 dataLog(" Allowing parsing to continue in last inlined block.\n");
1608 ASSERT(lastBlock->isEmpty() || !lastBlock->terminal());
1610 // If we created new blocks then the last block needs linking, but in the
1611 // caller. It doesn't need to be linked to, but it needs outgoing links.
1612 if (!inlineStackEntry.m_unlinkedBlocks.isEmpty()) {
1613 // For debugging purposes, set the bytecodeBegin. Note that this doesn't matter
1614 // for release builds because this block will never serve as a potential target
1615 // in the linker's binary search.
1616 if (Options::verboseDFGByteCodeParsing())
1617 dataLog(" Repurposing last block from ", lastBlock->bytecodeBegin, " to ", m_currentIndex, "\n");
1618 lastBlock->bytecodeBegin = m_currentIndex;
1619 if (callerLinkability == CallerDoesNormalLinking) {
1621 dataLog("Adding unlinked block ", RawPointer(m_graph.lastBlock()), " (one return)\n");
1622 m_inlineStackTop->m_caller->m_unlinkedBlocks.append(UnlinkedBlock(m_graph.lastBlock()));
1626 m_currentBlock = m_graph.lastBlock();
1630 if (Options::verboseDFGByteCodeParsing())
1631 dataLog(" Creating new block after inlining.\n");
1633 // If we get to this point then all blocks must end in some sort of terminals.
1634 ASSERT(lastBlock->terminal());
1636 // Need to create a new basic block for the continuation at the caller.
1637 Ref<BasicBlock> block = adoptRef(*new BasicBlock(nextOffset, m_numArguments, m_numLocals, 1));
1639 // Link the early returns to the basic block we're about to create.
1640 for (size_t i = 0; i < inlineStackEntry.m_unlinkedBlocks.size(); ++i) {
1641 if (!inlineStackEntry.m_unlinkedBlocks[i].m_needsEarlyReturnLinking)
1643 BasicBlock* blockToLink = inlineStackEntry.m_unlinkedBlocks[i].m_block;
1644 ASSERT(!blockToLink->isLinked);
1645 Node* node = blockToLink->terminal();
1646 ASSERT(node->op() == Jump);
1647 ASSERT(!node->targetBlock());
1648 node->targetBlock() = block.ptr();
1649 inlineStackEntry.m_unlinkedBlocks[i].m_needsEarlyReturnLinking = false;
1651 dataLog("Marking ", RawPointer(blockToLink), " as linked (jumps to return)\n");
1652 blockToLink->didLink();
1655 m_currentBlock = block.ptr();
1656 ASSERT(m_inlineStackTop->m_caller->m_blockLinkingTargets.isEmpty() || m_inlineStackTop->m_caller->m_blockLinkingTargets.last()->bytecodeBegin < nextOffset);
1658 dataLog("Adding unlinked block ", RawPointer(block.ptr()), " (many returns)\n");
1659 if (callerLinkability == CallerDoesNormalLinking) {
1660 m_inlineStackTop->m_caller->m_unlinkedBlocks.append(UnlinkedBlock(block.ptr()));
1661 m_inlineStackTop->m_caller->m_blockLinkingTargets.append(block.ptr());
1663 m_graph.appendBlock(WTFMove(block));
1664 prepareToParseBlock();
1667 void ByteCodeParser::cancelLinkingForBlock(InlineStackEntry* inlineStackEntry, BasicBlock* block)
1669 // It's possible that the callsite block head is not owned by the caller.
1670 if (!inlineStackEntry->m_unlinkedBlocks.isEmpty()) {
1671 // It's definitely owned by the caller, because the caller created new blocks.
1672 // Assert that this all adds up.
1673 ASSERT_UNUSED(block, inlineStackEntry->m_unlinkedBlocks.last().m_block == block);
1674 ASSERT(inlineStackEntry->m_unlinkedBlocks.last().m_needsNormalLinking);
1675 inlineStackEntry->m_unlinkedBlocks.last().m_needsNormalLinking = false;
1677 // It's definitely not owned by the caller. Tell the caller that he does not
1678 // need to link his callsite block head, because we did it for him.
1679 ASSERT(inlineStackEntry->m_callsiteBlockHeadNeedsLinking);
1680 ASSERT_UNUSED(block, inlineStackEntry->m_callsiteBlockHead == block);
1681 inlineStackEntry->m_callsiteBlockHeadNeedsLinking = false;
1685 template<typename ChecksFunctor>
1686 bool ByteCodeParser::attemptToInlineCall(Node* callTargetNode, int resultOperand, CallVariant callee, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind kind, CallerLinkability callerLinkability, SpeculatedType prediction, unsigned& inliningBalance, const ChecksFunctor& insertChecks)
1688 CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1690 if (!inliningBalance)
1694 dataLog(" Considering callee ", callee, "\n");
1696 // Intrinsics and internal functions can only be inlined if we're not doing varargs. This is because
1697 // we currently don't have any way of getting profiling information for arguments to non-JS varargs
1698 // calls. The prediction propagator won't be of any help because LoadVarargs obscures the data flow,
1699 // and there are no callsite value profiles and native function won't have callee value profiles for
1700 // those arguments. Even worse, if the intrinsic decides to exit, it won't really have anywhere to
1701 // exit to: LoadVarargs is effectful and it's part of the op_call_varargs, so we can't exit without
1702 // calling LoadVarargs twice.
1703 if (!InlineCallFrame::isVarargs(kind)) {
1705 bool didInsertChecks = false;
1706 auto insertChecksWithAccounting = [&] () {
1707 insertChecks(nullptr);
1708 didInsertChecks = true;
1711 if (InternalFunction* function = callee.internalFunction()) {
1712 if (handleConstantInternalFunction(callTargetNode, resultOperand, function, registerOffset, argumentCountIncludingThis, specializationKind, prediction, insertChecksWithAccounting)) {
1713 RELEASE_ASSERT(didInsertChecks);
1714 addToGraph(Phantom, callTargetNode);
1715 emitArgumentPhantoms(registerOffset, argumentCountIncludingThis);
1719 RELEASE_ASSERT(!didInsertChecks);
1723 Intrinsic intrinsic = callee.intrinsicFor(specializationKind);
1724 if (intrinsic != NoIntrinsic) {
1725 if (handleIntrinsicCall(callTargetNode, resultOperand, intrinsic, registerOffset, argumentCountIncludingThis, prediction, insertChecksWithAccounting)) {
1726 RELEASE_ASSERT(didInsertChecks);
1727 addToGraph(Phantom, callTargetNode);
1728 emitArgumentPhantoms(registerOffset, argumentCountIncludingThis);
1733 RELEASE_ASSERT(!didInsertChecks);
1734 // We might still try to inline the Intrinsic because it might be a builtin JS function.
1737 if (Options::useDOMJIT()) {
1738 if (const DOMJIT::Signature* signature = callee.signatureFor(specializationKind)) {
1739 if (handleDOMJITCall(callTargetNode, resultOperand, signature, registerOffset, argumentCountIncludingThis, prediction, insertChecksWithAccounting)) {
1740 RELEASE_ASSERT(didInsertChecks);
1741 addToGraph(Phantom, callTargetNode);
1742 emitArgumentPhantoms(registerOffset, argumentCountIncludingThis);
1746 RELEASE_ASSERT(!didInsertChecks);
1751 unsigned myInliningCost = inliningCost(callee, argumentCountIncludingThis, InlineCallFrame::callModeFor(kind));
1752 if (myInliningCost > inliningBalance)
1755 Instruction* savedCurrentInstruction = m_currentInstruction;
1756 inlineCall(callTargetNode, resultOperand, callee, registerOffset, argumentCountIncludingThis, nextOffset, kind, callerLinkability, insertChecks);
1757 inliningBalance -= myInliningCost;
1758 m_currentInstruction = savedCurrentInstruction;
1762 bool ByteCodeParser::handleInlining(
1763 Node* callTargetNode, int resultOperand, const CallLinkStatus& callLinkStatus,
1764 int registerOffsetOrFirstFreeReg, VirtualRegister thisArgument,
1765 VirtualRegister argumentsArgument, unsigned argumentsOffset, int argumentCountIncludingThis,
1766 unsigned nextOffset, NodeType callOp, InlineCallFrame::Kind kind, SpeculatedType prediction)
1769 dataLog("Handling inlining...\n");
1770 dataLog("Stack: ", currentCodeOrigin(), "\n");
1772 CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1774 if (!callLinkStatus.size()) {
1776 dataLog("Bailing inlining.\n");
1780 if (InlineCallFrame::isVarargs(kind)
1781 && callLinkStatus.maxNumArguments() > Options::maximumVarargsForInlining()) {
1783 dataLog("Bailing inlining because of varargs.\n");
1787 unsigned inliningBalance = Options::maximumFunctionForCallInlineCandidateInstructionCount();
1788 if (specializationKind == CodeForConstruct)
1789 inliningBalance = std::min(inliningBalance, Options::maximumFunctionForConstructInlineCandidateInstructionCount());
1790 if (callLinkStatus.isClosureCall())
1791 inliningBalance = std::min(inliningBalance, Options::maximumFunctionForClosureCallInlineCandidateInstructionCount());
1793 // First check if we can avoid creating control flow. Our inliner does some CFG
1794 // simplification on the fly and this helps reduce compile times, but we can only leverage
1795 // this in cases where we don't need control flow diamonds to check the callee.
1796 if (!callLinkStatus.couldTakeSlowPath() && callLinkStatus.size() == 1) {
1799 // Only used for varargs calls.
1800 unsigned mandatoryMinimum = 0;
1801 unsigned maxNumArguments = 0;
1803 if (InlineCallFrame::isVarargs(kind)) {
1804 if (FunctionExecutable* functionExecutable = callLinkStatus[0].functionExecutable())
1805 mandatoryMinimum = functionExecutable->parameterCount();
1807 mandatoryMinimum = 0;
1810 maxNumArguments = std::max(
1811 callLinkStatus.maxNumArguments(),
1812 mandatoryMinimum + 1);
1814 // We sort of pretend that this *is* the number of arguments that were passed.
1815 argumentCountIncludingThis = maxNumArguments;
1817 registerOffset = registerOffsetOrFirstFreeReg + 1;
1818 registerOffset -= maxNumArguments; // includes "this"
1819 registerOffset -= CallFrame::headerSizeInRegisters;
1820 registerOffset = -WTF::roundUpToMultipleOf(
1821 stackAlignmentRegisters(),
1824 registerOffset = registerOffsetOrFirstFreeReg;
1826 bool result = attemptToInlineCall(
1827 callTargetNode, resultOperand, callLinkStatus[0], registerOffset,
1828 argumentCountIncludingThis, nextOffset, kind, CallerDoesNormalLinking, prediction,
1829 inliningBalance, [&] (CodeBlock* codeBlock) {
1830 emitFunctionChecks(callLinkStatus[0], callTargetNode, thisArgument);
1832 // If we have a varargs call, we want to extract the arguments right now.
1833 if (InlineCallFrame::isVarargs(kind)) {
1834 int remappedRegisterOffset =
1835 m_inlineStackTop->remapOperand(VirtualRegister(registerOffset)).offset();
1837 ensureLocals(VirtualRegister(remappedRegisterOffset).toLocal());
1839 int argumentStart = registerOffset + CallFrame::headerSizeInRegisters;
1840 int remappedArgumentStart =
1841 m_inlineStackTop->remapOperand(VirtualRegister(argumentStart)).offset();
1843 LoadVarargsData* data = m_graph.m_loadVarargsData.add();
1844 data->start = VirtualRegister(remappedArgumentStart + 1);
1845 data->count = VirtualRegister(remappedRegisterOffset + CallFrameSlot::argumentCount);
1846 data->offset = argumentsOffset;
1847 data->limit = maxNumArguments;
1848 data->mandatoryMinimum = mandatoryMinimum;
1850 if (callOp == TailCallForwardVarargs)
1851 addToGraph(ForwardVarargs, OpInfo(data));
1853 addToGraph(LoadVarargs, OpInfo(data), get(argumentsArgument));
1855 // LoadVarargs may OSR exit. Hence, we need to keep alive callTargetNode, thisArgument
1856 // and argumentsArgument for the baseline JIT. However, we only need a Phantom for
1857 // callTargetNode because the other 2 are still in use and alive at this point.
1858 addToGraph(Phantom, callTargetNode);
1860 // In DFG IR before SSA, we cannot insert control flow between after the
1861 // LoadVarargs and the last SetArgument. This isn't a problem once we get to DFG
1862 // SSA. Fortunately, we also have other reasons for not inserting control flow
1865 VariableAccessData* countVariable = newVariableAccessData(
1866 VirtualRegister(remappedRegisterOffset + CallFrameSlot::argumentCount));
1867 // This is pretty lame, but it will force the count to be flushed as an int. This doesn't
1868 // matter very much, since our use of a SetArgument and Flushes for this local slot is
1869 // mostly just a formality.
1870 countVariable->predict(SpecInt32Only);
1871 countVariable->mergeIsProfitableToUnbox(true);
1872 Node* setArgumentCount = addToGraph(SetArgument, OpInfo(countVariable));
1873 m_currentBlock->variablesAtTail.setOperand(countVariable->local(), setArgumentCount);
1875 set(VirtualRegister(argumentStart), get(thisArgument), ImmediateNakedSet);
1876 for (unsigned argument = 1; argument < maxNumArguments; ++argument) {
1877 VariableAccessData* variable = newVariableAccessData(
1878 VirtualRegister(remappedArgumentStart + argument));
1879 variable->mergeShouldNeverUnbox(true); // We currently have nowhere to put the type check on the LoadVarargs. LoadVarargs is effectful, so after it finishes, we cannot exit.
1881 // For a while it had been my intention to do things like this inside the
1882 // prediction injection phase. But in this case it's really best to do it here,
1883 // because it's here that we have access to the variable access datas for the
1884 // inlining we're about to do.
1886 // Something else that's interesting here is that we'd really love to get
1887 // predictions from the arguments loaded at the callsite, rather than the
1888 // arguments received inside the callee. But that probably won't matter for most
1890 if (codeBlock && argument < static_cast<unsigned>(codeBlock->numParameters())) {
1891 ConcurrentJSLocker locker(codeBlock->m_lock);
1892 if (ValueProfile* profile = codeBlock->valueProfileForArgument(argument))
1893 variable->predict(profile->computeUpdatedPrediction(locker));
1896 Node* setArgument = addToGraph(SetArgument, OpInfo(variable));
1897 m_currentBlock->variablesAtTail.setOperand(variable->local(), setArgument);
1902 dataLog("Done inlining (simple).\n");
1903 dataLog("Stack: ", currentCodeOrigin(), "\n");
1904 dataLog("Result: ", result, "\n");
1909 // We need to create some kind of switch over callee. For now we only do this if we believe that
1910 // we're in the top tier. We have two reasons for this: first, it provides us an opportunity to
1911 // do more detailed polyvariant/polymorphic profiling; and second, it reduces compile times in
1912 // the DFG. And by polyvariant profiling we mean polyvariant profiling of *this* call. Note that
1913 // we could improve that aspect of this by doing polymorphic inlining but having the profiling
1915 if (!isFTL(m_graph.m_plan.mode) || !Options::usePolymorphicCallInlining()
1916 || InlineCallFrame::isVarargs(kind)) {
1918 dataLog("Bailing inlining (hard).\n");
1919 dataLog("Stack: ", currentCodeOrigin(), "\n");
1924 // If the claim is that this did not originate from a stub, then we don't want to emit a switch
1925 // statement. Whenever the non-stub profiling says that it could take slow path, it really means that
1927 if (!Options::usePolymorphicCallInliningForNonStubStatus()
1928 && !callLinkStatus.isBasedOnStub()) {
1930 dataLog("Bailing inlining (non-stub polymorphism).\n");
1931 dataLog("Stack: ", currentCodeOrigin(), "\n");
1936 unsigned oldOffset = m_currentIndex;
1938 bool allAreClosureCalls = true;
1939 bool allAreDirectCalls = true;
1940 for (unsigned i = callLinkStatus.size(); i--;) {
1941 if (callLinkStatus[i].isClosureCall())
1942 allAreDirectCalls = false;
1944 allAreClosureCalls = false;
1947 Node* thingToSwitchOn;
1948 if (allAreDirectCalls)
1949 thingToSwitchOn = callTargetNode;
1950 else if (allAreClosureCalls)
1951 thingToSwitchOn = addToGraph(GetExecutable, callTargetNode);
1953 // FIXME: We should be able to handle this case, but it's tricky and we don't know of cases
1954 // where it would be beneficial. It might be best to handle these cases as if all calls were
1956 // https://bugs.webkit.org/show_bug.cgi?id=136020
1958 dataLog("Bailing inlining (mix).\n");
1959 dataLog("Stack: ", currentCodeOrigin(), "\n");
1965 dataLog("Doing hard inlining...\n");
1966 dataLog("Stack: ", currentCodeOrigin(), "\n");
1969 int registerOffset = registerOffsetOrFirstFreeReg;
1971 // This makes me wish that we were in SSA all the time. We need to pick a variable into which to
1972 // store the callee so that it will be accessible to all of the blocks we're about to create. We
1973 // get away with doing an immediate-set here because we wouldn't have performed any side effects
1976 dataLog("Register offset: ", registerOffset);
1977 VirtualRegister calleeReg(registerOffset + CallFrameSlot::callee);
1978 calleeReg = m_inlineStackTop->remapOperand(calleeReg);
1980 dataLog("Callee is going to be ", calleeReg, "\n");
1981 setDirect(calleeReg, callTargetNode, ImmediateSetWithFlush);
1983 // It's OK to exit right now, even though we set some locals. That's because those locals are not
1988 SwitchData& data = *m_graph.m_switchData.add();
1989 data.kind = SwitchCell;
1990 addToGraph(Switch, OpInfo(&data), thingToSwitchOn);
1992 BasicBlock* originBlock = m_currentBlock;
1994 dataLog("Marking ", RawPointer(originBlock), " as linked (origin of poly inline)\n");
1995 originBlock->didLink();
1996 cancelLinkingForBlock(m_inlineStackTop, originBlock);
1998 // Each inlined callee will have a landing block that it returns at. They should all have jumps
1999 // to the continuation block, which we create last.
2000 Vector<BasicBlock*> landingBlocks;
2002 // We may force this true if we give up on inlining any of the edges.
2003 bool couldTakeSlowPath = callLinkStatus.couldTakeSlowPath();
2006 dataLog("About to loop over functions at ", currentCodeOrigin(), ".\n");
2008 for (unsigned i = 0; i < callLinkStatus.size(); ++i) {
2009 m_currentIndex = oldOffset;
2010 Ref<BasicBlock> block = adoptRef(*new BasicBlock(UINT_MAX, m_numArguments, m_numLocals, 1));
2011 m_currentBlock = block.ptr();
2012 m_graph.appendBlock(block.copyRef());
2013 prepareToParseBlock();
2015 Node* myCallTargetNode = getDirect(calleeReg);
2017 bool inliningResult = attemptToInlineCall(
2018 myCallTargetNode, resultOperand, callLinkStatus[i], registerOffset,
2019 argumentCountIncludingThis, nextOffset, kind, CallerLinksManually, prediction,
2020 inliningBalance, [&] (CodeBlock*) { });
2022 if (!inliningResult) {
2023 // That failed so we let the block die. Nothing interesting should have been added to
2024 // the block. We also give up on inlining any of the (less frequent) callees.
2025 ASSERT(m_currentBlock == block.ptr());
2026 ASSERT(m_graph.m_blocks.last() == block.ptr());
2027 m_graph.killBlockAndItsContents(block.ptr());
2028 m_graph.m_blocks.removeLast();
2030 // The fact that inlining failed means we need a slow path.
2031 couldTakeSlowPath = true;
2035 JSCell* thingToCaseOn;
2036 if (allAreDirectCalls)
2037 thingToCaseOn = callLinkStatus[i].nonExecutableCallee();
2039 ASSERT(allAreClosureCalls);
2040 thingToCaseOn = callLinkStatus[i].executable();
2042 data.cases.append(SwitchCase(m_graph.freeze(thingToCaseOn), block.ptr()));
2043 m_currentIndex = nextOffset;
2045 processSetLocalQueue(); // This only comes into play for intrinsics, since normal inlined code will leave an empty queue.
2046 if (Node* terminal = m_currentBlock->terminal())
2047 ASSERT_UNUSED(terminal, terminal->op() == TailCall || terminal->op() == TailCallVarargs || terminal->op() == TailCallForwardVarargs);
2050 landingBlocks.append(m_currentBlock);
2053 dataLog("Marking ", RawPointer(m_currentBlock), " as linked (tail of poly inlinee)\n");
2054 m_currentBlock->didLink();
2057 dataLog("Finished inlining ", callLinkStatus[i], " at ", currentCodeOrigin(), ".\n");
2060 Ref<BasicBlock> slowPathBlock = adoptRef(
2061 *new BasicBlock(UINT_MAX, m_numArguments, m_numLocals, 1));
2062 m_currentIndex = oldOffset;
2064 data.fallThrough = BranchTarget(slowPathBlock.ptr());
2065 m_graph.appendBlock(slowPathBlock.copyRef());
2067 dataLog("Marking ", RawPointer(slowPathBlock.ptr()), " as linked (slow path block)\n");
2068 slowPathBlock->didLink();
2069 prepareToParseBlock();
2070 m_currentBlock = slowPathBlock.ptr();
2071 Node* myCallTargetNode = getDirect(calleeReg);
2072 if (couldTakeSlowPath) {
2074 resultOperand, callOp, nullptr, myCallTargetNode, argumentCountIncludingThis,
2075 registerOffset, prediction);
2077 addToGraph(CheckBadCell);
2078 addToGraph(Phantom, myCallTargetNode);
2079 emitArgumentPhantoms(registerOffset, argumentCountIncludingThis);
2081 set(VirtualRegister(resultOperand), addToGraph(BottomValue));
2084 m_currentIndex = nextOffset;
2085 m_exitOK = true; // Origin changed, so it's fine to exit again.
2086 processSetLocalQueue();
2087 if (Node* terminal = m_currentBlock->terminal())
2088 ASSERT_UNUSED(terminal, terminal->op() == TailCall || terminal->op() == TailCallVarargs || terminal->op() == TailCallForwardVarargs);
2091 landingBlocks.append(m_currentBlock);
2094 Ref<BasicBlock> continuationBlock = adoptRef(
2095 *new BasicBlock(UINT_MAX, m_numArguments, m_numLocals, 1));
2096 m_graph.appendBlock(continuationBlock.copyRef());
2098 dataLog("Adding unlinked block ", RawPointer(continuationBlock.ptr()), " (continuation)\n");
2099 m_inlineStackTop->m_unlinkedBlocks.append(UnlinkedBlock(continuationBlock.ptr()));
2100 prepareToParseBlock();
2101 m_currentBlock = continuationBlock.ptr();
2103 for (unsigned i = landingBlocks.size(); i--;)
2104 landingBlocks[i]->terminal()->targetBlock() = continuationBlock.ptr();
2106 m_currentIndex = oldOffset;
2110 dataLog("Done inlining (hard).\n");
2111 dataLog("Stack: ", currentCodeOrigin(), "\n");
2116 template<typename ChecksFunctor>
2117 bool ByteCodeParser::handleMinMax(int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& insertChecks)
2119 ASSERT(op == ArithMin || op == ArithMax);
2121 if (argumentCountIncludingThis == 1) {
2123 double result = op == ArithMax ? -std::numeric_limits<double>::infinity() : +std::numeric_limits<double>::infinity();
2124 set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_graph.freeze(jsDoubleNumber(result)))));
2128 if (argumentCountIncludingThis == 2) {
2130 Node* result = get(VirtualRegister(virtualRegisterForArgument(1, registerOffset)));
2131 addToGraph(Phantom, Edge(result, NumberUse));
2132 set(VirtualRegister(resultOperand), result);
2136 if (argumentCountIncludingThis == 3) {
2138 set(VirtualRegister(resultOperand), addToGraph(op, get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset))));
2142 // Don't handle >=3 arguments for now.
2146 template<typename ChecksFunctor>
2147 bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrinsic intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks)
2149 switch (intrinsic) {
2151 // Intrinsic Functions:
2153 case AbsIntrinsic: {
2154 if (argumentCountIncludingThis == 1) { // Math.abs()
2156 set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN)));
2160 if (!MacroAssembler::supportsFloatingPointAbs())
2164 Node* node = addToGraph(ArithAbs, get(virtualRegisterForArgument(1, registerOffset)));
2165 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
2166 node->mergeFlags(NodeMayOverflowInt32InDFG);
2167 set(VirtualRegister(resultOperand), node);
2172 return handleMinMax(resultOperand, ArithMin, registerOffset, argumentCountIncludingThis, insertChecks);
2175 return handleMinMax(resultOperand, ArithMax, registerOffset, argumentCountIncludingThis, insertChecks);
2178 case FRoundIntrinsic:
2182 case TanIntrinsic: {
2183 if (argumentCountIncludingThis == 1) {
2185 set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN)));
2189 NodeType nodeType = Unreachable;
2190 switch (intrinsic) {
2192 nodeType = ArithCos;
2194 case FRoundIntrinsic:
2195 nodeType = ArithFRound;
2198 nodeType = ArithLog;
2201 nodeType = ArithSin;
2204 nodeType = ArithSqrt;
2207 nodeType = ArithTan;
2210 RELEASE_ASSERT_NOT_REACHED();
2213 set(VirtualRegister(resultOperand), addToGraph(nodeType, get(virtualRegisterForArgument(1, registerOffset))));
2217 case PowIntrinsic: {
2218 if (argumentCountIncludingThis < 3) {
2219 // Math.pow() and Math.pow(x) return NaN.
2221 set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN)));
2225 VirtualRegister xOperand = virtualRegisterForArgument(1, registerOffset);
2226 VirtualRegister yOperand = virtualRegisterForArgument(2, registerOffset);
2227 set(VirtualRegister(resultOperand), addToGraph(ArithPow, get(xOperand), get(yOperand)));
2231 case ArrayPushIntrinsic: {
2232 if (argumentCountIncludingThis != 2)
2235 ArrayMode arrayMode = getArrayMode(m_currentInstruction[OPCODE_LENGTH(op_call) - 2].u.arrayProfile);
2236 if (!arrayMode.isJSArray())
2238 switch (arrayMode.type()) {
2241 case Array::Contiguous:
2242 case Array::ArrayStorage: {
2244 Node* arrayPush = addToGraph(ArrayPush, OpInfo(arrayMode.asWord()), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)));
2245 set(VirtualRegister(resultOperand), arrayPush);
2255 case ArraySliceIntrinsic: {
2256 #if USE(JSVALUE32_64)
2257 if (isX86() || isMIPS()) {
2258 // There aren't enough registers for this to be done easily.
2262 if (argumentCountIncludingThis < 2)
2265 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadConstantCache)
2266 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache))
2269 ArrayMode arrayMode = getArrayMode(m_currentInstruction[OPCODE_LENGTH(op_call) - 2].u.arrayProfile);
2270 if (!arrayMode.isJSArray())
2273 if (arrayMode.arrayClass() != Array::OriginalArray)
2276 switch (arrayMode.type()) {
2279 case Array::Contiguous: {
2280 JSGlobalObject* globalObject = m_graph.globalObjectFor(currentNodeOrigin().semantic);
2282 InlineWatchpointSet& objectPrototypeTransition = globalObject->objectPrototype()->structure()->transitionWatchpointSet();
2283 InlineWatchpointSet& arrayPrototypeTransition = globalObject->arrayPrototype()->structure()->transitionWatchpointSet();
2285 // FIXME: We could easily relax the Array/Object.prototype transition as long as we OSR exitted if we saw a hole.
2286 if (globalObject->arraySpeciesWatchpoint().state() == IsWatched
2287 && globalObject->havingABadTimeWatchpoint()->isStillValid()
2288 && arrayPrototypeTransition.isStillValid()
2289 && objectPrototypeTransition.isStillValid()
2290 && globalObject->arrayPrototypeChainIsSane()) {
2292 m_graph.watchpoints().addLazily(globalObject->arraySpeciesWatchpoint());
2293 m_graph.watchpoints().addLazily(globalObject->havingABadTimeWatchpoint());
2294 m_graph.watchpoints().addLazily(arrayPrototypeTransition);
2295 m_graph.watchpoints().addLazily(objectPrototypeTransition);
2299 Node* array = get(virtualRegisterForArgument(0, registerOffset));
2300 // We do a few things here to prove that we aren't skipping doing side-effects in an observable way:
2301 // 1. We ensure that the "constructor" property hasn't been changed (because the observable
2302 // effects of slice require that we perform a Get(array, "constructor") and we can skip
2303 // that if we're an original array structure. (We can relax this in the future by using
2304 // TryGetById and CheckCell).
2306 // 2. We check that the array we're calling slice on has the same global object as the lexical
2307 // global object that this code is running in. This requirement is necessary because we setup the
2308 // watchpoints above on the lexical global object. This means that code that calls slice on
2309 // arrays produced by other global objects won't get this optimization. We could relax this
2310 // requirement in the future by checking that the watchpoint hasn't fired at runtime in the code
2311 // we generate instead of registering it as a watchpoint that would invalidate the compilation.
2313 // 3. By proving we're an original array structure, we guarantee that the incoming array
2314 // isn't a subclass of Array.
2316 StructureSet structureSet;
2317 structureSet.add(globalObject->originalArrayStructureForIndexingType(ArrayWithInt32));
2318 structureSet.add(globalObject->originalArrayStructureForIndexingType(ArrayWithContiguous));
2319 structureSet.add(globalObject->originalArrayStructureForIndexingType(ArrayWithDouble));
2320 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(structureSet)), array);
2322 addVarArgChild(array);
2323 addVarArgChild(get(virtualRegisterForArgument(1, registerOffset))); // Start index.
2324 if (argumentCountIncludingThis >= 3)
2325 addVarArgChild(get(virtualRegisterForArgument(2, registerOffset))); // End index.
2326 addVarArgChild(addToGraph(GetButterfly, array));
2328 Node* arraySlice = addToGraph(Node::VarArg, ArraySlice, OpInfo(), OpInfo());
2329 set(VirtualRegister(resultOperand), arraySlice);
2339 RELEASE_ASSERT_NOT_REACHED();
2343 case ArrayPopIntrinsic: {
2344 if (argumentCountIncludingThis != 1)
2347 ArrayMode arrayMode = getArrayMode(m_currentInstruction[OPCODE_LENGTH(op_call) - 2].u.arrayProfile);
2348 if (!arrayMode.isJSArray())
2350 switch (arrayMode.type()) {
2353 case Array::Contiguous:
2354 case Array::ArrayStorage: {
2356 Node* arrayPop = addToGraph(ArrayPop, OpInfo(arrayMode.asWord()), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)));
2357 set(VirtualRegister(resultOperand), arrayPop);
2366 case ParseIntIntrinsic: {
2367 if (argumentCountIncludingThis < 2)
2370 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell) || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
2374 VirtualRegister valueOperand = virtualRegisterForArgument(1, registerOffset);
2376 if (argumentCountIncludingThis == 2)
2377 parseInt = addToGraph(ParseInt, OpInfo(), OpInfo(prediction), get(valueOperand));
2379 ASSERT(argumentCountIncludingThis > 2);
2380 VirtualRegister radixOperand = virtualRegisterForArgument(2, registerOffset);
2381 parseInt = addToGraph(ParseInt, OpInfo(), OpInfo(prediction), get(valueOperand), get(radixOperand));
2383 set(VirtualRegister(resultOperand), parseInt);
2387 case CharCodeAtIntrinsic: {
2388 if (argumentCountIncludingThis != 2)
2392 VirtualRegister thisOperand = virtualRegisterForArgument(0, registerOffset);
2393 VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
2394 Node* charCode = addToGraph(StringCharCodeAt, OpInfo(ArrayMode(Array::String).asWord()), get(thisOperand), get(indexOperand));
2396 set(VirtualRegister(resultOperand), charCode);
2400 case CharAtIntrinsic: {
2401 if (argumentCountIncludingThis != 2)
2405 VirtualRegister thisOperand = virtualRegisterForArgument(0, registerOffset);
2406 VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
2407 Node* charCode = addToGraph(StringCharAt, OpInfo(ArrayMode(Array::String).asWord()), get(thisOperand), get(indexOperand));
2409 set(VirtualRegister(resultOperand), charCode);
2412 case Clz32Intrinsic: {
2414 if (argumentCountIncludingThis == 1)
2415 set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_graph.freeze(jsNumber(32)))));
2417 Node* operand = get(virtualRegisterForArgument(1, registerOffset));
2418 set(VirtualRegister(resultOperand), addToGraph(ArithClz32, operand));
2422 case FromCharCodeIntrinsic: {
2423 if (argumentCountIncludingThis != 2)
2427 VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
2428 Node* charCode = addToGraph(StringFromCharCode, get(indexOperand));
2430 set(VirtualRegister(resultOperand), charCode);
2435 case RegExpExecIntrinsic: {
2436 if (argumentCountIncludingThis != 2)
2440 Node* regExpExec = addToGraph(RegExpExec, OpInfo(0), OpInfo(prediction), addToGraph(GetGlobalObject, callee), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)));
2441 set(VirtualRegister(resultOperand), regExpExec);
2446 case RegExpTestIntrinsic:
2447 case RegExpTestFastIntrinsic: {
2448 if (argumentCountIncludingThis != 2)
2451 if (intrinsic == RegExpTestIntrinsic) {
2452 // Don't inline intrinsic if we exited due to one of the primordial RegExp checks failing.
2453 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell))
2456 JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
2457 Structure* regExpStructure = globalObject->regExpStructure();
2458 m_graph.registerStructure(regExpStructure);
2459 ASSERT(regExpStructure->storedPrototype().isObject());
2460 ASSERT(regExpStructure->storedPrototype().asCell()->classInfo(*m_vm) == RegExpPrototype::info());
2462 FrozenValue* regExpPrototypeObjectValue = m_graph.freeze(regExpStructure->storedPrototype());
2463 Structure* regExpPrototypeStructure = regExpPrototypeObjectValue->structure();
2465 auto isRegExpPropertySame = [&] (JSValue primordialProperty, UniquedStringImpl* propertyUID) {
2466 JSValue currentProperty;
2467 if (!m_graph.getRegExpPrototypeProperty(regExpStructure->storedPrototypeObject(), regExpPrototypeStructure, propertyUID, currentProperty))
2470 return currentProperty == primordialProperty;
2473 // Check that RegExp.exec is still the primordial RegExp.prototype.exec
2474 if (!isRegExpPropertySame(globalObject->regExpProtoExecFunction(), m_vm->propertyNames->exec.impl()))
2477 // Check that regExpObject is actually a RegExp object.
2478 Node* regExpObject = get(virtualRegisterForArgument(0, registerOffset));
2479 addToGraph(Check, Edge(regExpObject, RegExpObjectUse));
2481 // Check that regExpObject's exec is actually the primodial RegExp.prototype.exec.
2482 UniquedStringImpl* execPropertyID = m_vm->propertyNames->exec.impl();
2483 unsigned execIndex = m_graph.identifiers().ensure(execPropertyID);
2484 Node* actualProperty = addToGraph(TryGetById, OpInfo(execIndex), OpInfo(SpecFunction), Edge(regExpObject, CellUse));
2485 FrozenValue* regExpPrototypeExec = m_graph.freeze(globalObject->regExpProtoExecFunction());
2486 addToGraph(CheckCell, OpInfo(regExpPrototypeExec), Edge(actualProperty, CellUse));
2490 Node* regExpObject = get(virtualRegisterForArgument(0, registerOffset));
2491 Node* regExpExec = addToGraph(RegExpTest, OpInfo(0), OpInfo(prediction), addToGraph(GetGlobalObject, callee), regExpObject, get(virtualRegisterForArgument(1, registerOffset)));
2492 set(VirtualRegister(resultOperand), regExpExec);
2497 case IsTypedArrayViewIntrinsic: {
2498 ASSERT(argumentCountIncludingThis == 2);
2501 set(VirtualRegister(resultOperand), addToGraph(IsTypedArrayView, OpInfo(prediction), get(virtualRegisterForArgument(1, registerOffset))));
2505 case StringPrototypeReplaceIntrinsic: {
2506 if (argumentCountIncludingThis != 3)
2509 // Don't inline intrinsic if we exited due to "search" not being a RegExp or String object.
2510 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
2513 // Don't inline intrinsic if we exited due to one of the primordial RegExp checks failing.
2514 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell))
2517 JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
2518 Structure* regExpStructure = globalObject->regExpStructure();
2519 m_graph.registerStructure(regExpStructure);
2520 ASSERT(regExpStructure->storedPrototype().isObject());
2521 ASSERT(regExpStructure->storedPrototype().asCell()->classInfo(*m_vm) == RegExpPrototype::info());
2523 FrozenValue* regExpPrototypeObjectValue = m_graph.freeze(regExpStructure->storedPrototype());
2524 Structure* regExpPrototypeStructure = regExpPrototypeObjectValue->structure();
2526 auto isRegExpPropertySame = [&] (JSValue primordialProperty, UniquedStringImpl* propertyUID) {
2527 JSValue currentProperty;
2528 if (!m_graph.getRegExpPrototypeProperty(regExpStructure->storedPrototypeObject(), regExpPrototypeStructure, propertyUID, currentProperty))
2531 return currentProperty == primordialProperty;
2534 // Check that searchRegExp.exec is still the primordial RegExp.prototype.exec
2535 if (!isRegExpPropertySame(globalObject->regExpProtoExecFunction(), m_vm->propertyNames->exec.impl()))
2538 // Check that searchRegExp.global is still the primordial RegExp.prototype.global
2539 if (!isRegExpPropertySame(globalObject->regExpProtoGlobalGetter(), m_vm->propertyNames->global.impl()))
2542 // Check that searchRegExp.unicode is still the primordial RegExp.prototype.unicode
2543 if (!isRegExpPropertySame(globalObject->regExpProtoUnicodeGetter(), m_vm->propertyNames->unicode.impl()))
2546 // Check that searchRegExp[Symbol.match] is still the primordial RegExp.prototype[Symbol.replace]
2547 if (!isRegExpPropertySame(globalObject->regExpProtoSymbolReplaceFunction(), m_vm->propertyNames->replaceSymbol.impl()))
2552 Node* result = addToGraph(StringReplace, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset)));
2553 set(VirtualRegister(resultOperand), result);
2557 case StringPrototypeReplaceRegExpIntrinsic: {
2558 if (argumentCountIncludingThis != 3)
2562 Node* result = addToGraph(StringReplaceRegExp, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset)));
2563 set(VirtualRegister(resultOperand), result);
2567 case RoundIntrinsic:
2568 case FloorIntrinsic:
2570 case TruncIntrinsic: {
2571 if (argumentCountIncludingThis == 1) {
2573 set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN)));
2577 Node* operand = get(virtualRegisterForArgument(1, registerOffset));
2579 if (intrinsic == RoundIntrinsic)
2581 else if (intrinsic == FloorIntrinsic)
2583 else if (intrinsic == CeilIntrinsic)
2586 ASSERT(intrinsic == TruncIntrinsic);
2589 Node* roundNode = addToGraph(op, OpInfo(0), OpInfo(prediction), operand);
2590 set(VirtualRegister(resultOperand), roundNode);
2593 case IMulIntrinsic: {
2594 if (argumentCountIncludingThis != 3)
2597 VirtualRegister leftOperand = virtualRegisterForArgument(1, registerOffset);
2598 VirtualRegister rightOperand = virtualRegisterForArgument(2, registerOffset);
2599 Node* left = get(leftOperand);
2600 Node* right = get(rightOperand);
2601 set(VirtualRegister(resultOperand), addToGraph(ArithIMul, left, right));
2605 case RandomIntrinsic: {
2606 if (argumentCountIncludingThis != 1)
2609 set(VirtualRegister(resultOperand), addToGraph(ArithRandom));
2613 case DFGTrueIntrinsic: {
2615 set(VirtualRegister(resultOperand), jsConstant(jsBoolean(true)));
2619 case OSRExitIntrinsic: {
2621 addToGraph(ForceOSRExit);
2622 set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantUndefined)));
2626 case IsFinalTierIntrinsic: {
2628 set(VirtualRegister(resultOperand),
2629 jsConstant(jsBoolean(Options::useFTLJIT() ? isFTL(m_graph.m_plan.mode) : true)));
2633 case SetInt32HeapPredictionIntrinsic: {
2635 for (int i = 1; i < argumentCountIncludingThis; ++i) {
2636 Node* node = get(virtualRegisterForArgument(i, registerOffset));
2637 if (node->hasHeapPrediction())
2638 node->setHeapPrediction(SpecInt32Only);
2640 set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantUndefined)));
2644 case CheckInt32Intrinsic: {
2646 for (int i = 1; i < argumentCountIncludingThis; ++i) {
2647 Node* node = get(virtualRegisterForArgument(i, registerOffset));
2648 addToGraph(Phantom, Edge(node, Int32Use));
2650 set(VirtualRegister(resultOperand), jsConstant(jsBoolean(true)));
2654 case FiatInt52Intrinsic: {
2655 if (argumentCountIncludingThis != 2)
2658 VirtualRegister operand = virtualRegisterForArgument(1, registerOffset);
2660 set(VirtualRegister(resultOperand), addToGraph(FiatInt52, get(operand)));
2662 set(VirtualRegister(resultOperand), get(operand));
2666 case JSMapGetIntrinsic: {
2667 if (argumentCountIncludingThis != 2)
2671 Node* map = get(virtualRegisterForArgument(0, registerOffset));
2672 Node* key = get(virtualRegisterForArgument(1, registerOffset));
2673 Node* hash = addToGraph(MapHash, key);
2674 Node* bucket = addToGraph(GetMapBucket, Edge(map, MapObjectUse), Edge(key), Edge(hash));
2675 Node* result = addToGraph(LoadFromJSMapBucket, OpInfo(), OpInfo(prediction), bucket);
2676 set(VirtualRegister(resultOperand), result);
2680 case JSSetHasIntrinsic:
2681 case JSMapHasIntrinsic: {
2682 if (argumentCountIncludingThis != 2)
2686 Node* mapOrSet = get(virtualRegisterForArgument(0, registerOffset));
2687 Node* key = get(virtualRegisterForArgument(1, registerOffset));
2688 Node* hash = addToGraph(MapHash, key);
2689 UseKind useKind = intrinsic == JSSetHasIntrinsic ? SetObjectUse : MapObjectUse;
2690 Node* bucket = addToGraph(GetMapBucket, OpInfo(0), Edge(mapOrSet, useKind), Edge(key), Edge(hash));
2691 Node* result = addToGraph(IsNonEmptyMapBucket, bucket);
2692 set(VirtualRegister(resultOperand), result);
2696 case HasOwnPropertyIntrinsic: {
2697 if (argumentCountIncludingThis != 2)
2700 // This can be racy, that's fine. We know that once we observe that this is created,
2701 // that it will never be destroyed until the VM is destroyed. It's unlikely that
2702 // we'd ever get to the point where we inline this as an intrinsic without the
2703 // cache being created, however, it's possible if we always throw exceptions inside
2705 if (!m_vm->hasOwnPropertyCache())
2709 Node* object = get(virtualRegisterForArgument(0, registerOffset));
2710 Node* key = get(virtualRegisterForArgument(1, registerOffset));
2711 Node* result = addToGraph(HasOwnProperty, object, key);
2712 set(VirtualRegister(resultOperand), result);
2716 case StringPrototypeToLowerCaseIntrinsic: {
2717 if (argumentCountIncludingThis != 1)
2720 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
2724 Node* thisString = get(virtualRegisterForArgument(0, registerOffset));
2725 Node* result = addToGraph(ToLowerCase, thisString);
2726 set(VirtualRegister(resultOperand), result);
2730 case NumberPrototypeToStringIntrinsic: {
2731 if (argumentCountIncludingThis != 1 && argumentCountIncludingThis != 2)
2734 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
2738 Node* thisNumber = get(virtualRegisterForArgument(0, registerOffset));
2739 if (argumentCountIncludingThis == 1) {
2740 Node* result = addToGraph(ToString, thisNumber);
2741 set(VirtualRegister(resultOperand), result);
2743 Node* radix = get(virtualRegisterForArgument(1, registerOffset));
2744 Node* result = addToGraph(NumberToStringWithRadix, thisNumber, radix);
2745 set(VirtualRegister(resultOperand), result);
2755 template<typename ChecksFunctor>
2756 bool ByteCodeParser::handleDOMJITCall(Node* callTarget, int resultOperand, const DOMJIT::Signature* signature, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks)
2758 if (argumentCountIncludingThis != static_cast<int>(1 + signature->argumentCount))
2760 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType))
2763 // FIXME: Currently, we only support functions which arguments are up to 2.
2764 // Eventually, we should extend this. But possibly, 2 or 3 can cover typical use cases.
2765 // https://bugs.webkit.org/show_bug.cgi?id=164346
2766 ASSERT_WITH_MESSAGE(argumentCountIncludingThis <= JSC_DOMJIT_SIGNATURE_MAX_ARGUMENTS_INCLUDING_THIS, "Currently CallDOM does not support an arbitrary length arguments.");
2769 addCall(resultOperand, Call, signature, callTarget, argumentCountIncludingThis, registerOffset, prediction);
2774 template<typename ChecksFunctor>
2775 bool ByteCodeParser::handleIntrinsicGetter(int resultOperand, const GetByIdVariant& variant, Node* thisNode, const ChecksFunctor& insertChecks)
2777 switch (variant.intrinsic()) {
2778 case TypedArrayByteLengthIntrinsic: {
2781 TypedArrayType type = (*variant.structureSet().begin())->classInfo()->typedArrayStorageType;
2782 Array::Type arrayType = toArrayType(type);
2783 size_t logSize = logElementSize(type);
2785 variant.structureSet().forEach([&] (Structure* structure) {
2786 TypedArrayType curType = structure->classInfo()->typedArrayStorageType;
2787 ASSERT(logSize == logElementSize(curType));
2788 arrayType = refineTypedArrayType(arrayType, curType);
2789 ASSERT(arrayType != Array::Generic);
2792 Node* lengthNode = addToGraph(GetArrayLength, OpInfo(ArrayMode(arrayType).asWord()), thisNode);
2795 set(VirtualRegister(resultOperand), lengthNode);
2799 // We can use a BitLShift here because typed arrays will never have a byteLength
2800 // that overflows int32.
2801 Node* shiftNode = jsConstant(jsNumber(logSize));
2802 set(VirtualRegister(resultOperand), addToGraph(BitLShift, lengthNode, shiftNode));
2807 case TypedArrayLengthIntrinsic: {
2810 TypedArrayType type = (*variant.structureSet().begin())->classInfo()->typedArrayStorageType;
2811 Array::Type arrayType = toArrayType(type);
2813 variant.structureSet().forEach([&] (Structure* structure) {
2814 TypedArrayType curType = structure->classInfo()->typedArrayStorageType;
2815 arrayType = refineTypedArrayType(arrayType, curType);
2816 ASSERT(arrayType != Array::Generic);
2819 set(VirtualRegister(resultOperand), addToGraph(GetArrayLength, OpInfo(ArrayMode(arrayType).asWord()), thisNode));
2825 case TypedArrayByteOffsetIntrinsic: {
2828 TypedArrayType type = (*variant.structureSet().begin())->classInfo()->typedArrayStorageType;
2829 Array::Type arrayType = toArrayType(type);
2831 variant.structureSet().forEach([&] (Structure* structure) {
2832 TypedArrayType curType = structure->classInfo()->typedArrayStorageType;
2833 arrayType = refineTypedArrayType(arrayType, curType);
2834 ASSERT(arrayType != Array::Generic);
2837 set(VirtualRegister(resultOperand), addToGraph(GetTypedArrayByteOffset, OpInfo(ArrayMode(arrayType).asWord()), thisNode));
2845 RELEASE_ASSERT_NOT_REACHED();
2848 static void blessCallDOMGetter(Node* node)
2850 DOMJIT::CallDOMGetterPatchpoint* patchpoint = node->callDOMGetterData()->patchpoint;
2851 if (!patchpoint->effect.mustGenerate())
2852 node->clearFlags(NodeMustGenerate);
2855 bool ByteCodeParser::handleDOMJITGetter(int resultOperand, const GetByIdVariant& variant, Node* thisNode, unsigned identifierNumber, SpeculatedType prediction)
2857 if (!variant.domJIT())
2860 DOMJIT::GetterSetter* domJIT = variant.domJIT();
2862 // We do not need to actually look up CustomGetterSetter here. Checking Structures or registering watchpoints are enough,
2863 // since replacement of CustomGetterSetter always incurs Structure transition.
2864 if (!check(variant.conditionSet()))
2866 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(variant.structureSet())), thisNode);
2868 Ref<DOMJIT::Patchpoint> checkDOMPatchpoint = domJIT->checkDOM();
2869 m_graph.m_domJITPatchpoints.append(checkDOMPatchpoint.ptr());
2870 // We do not need to emit CheckCell thingy here. When the custom accessor is replaced to different one, Structure transition occurs.
2871 addToGraph(CheckDOM, OpInfo(checkDOMPatchpoint.ptr()), OpInfo(domJIT->thisClassInfo()), thisNode);
2873 CallDOMGetterData* callDOMGetterData = m_graph.m_callDOMGetterData.add();
2874 Ref<DOMJIT::CallDOMGetterPatchpoint> callDOMGetterPatchpoint = domJIT->callDOMGetter();
2875 m_graph.m_domJITPatchpoints.append(callDOMGetterPatchpoint.ptr());
2877 callDOMGetterData->domJIT = domJIT;
2878 callDOMGetterData->patchpoint = callDOMGetterPatchpoint.ptr();
2879 callDOMGetterData->identifierNumber = identifierNumber;
2881 Node* callDOMGetterNode = nullptr;
2882 // GlobalObject of thisNode is always used to create a DOMWrapper.
2883 if (callDOMGetterPatchpoint->requireGlobalObject) {
2884 Node* globalObject = addToGraph(GetGlobalObject, thisNode);
2885 callDOMGetterNode = addToGraph(CallDOMGetter, OpInfo(callDOMGetterData), OpInfo(prediction), thisNode, globalObject);
2887 callDOMGetterNode = addToGraph(CallDOMGetter, OpInfo(callDOMGetterData), OpInfo(prediction), thisNode);
2888 blessCallDOMGetter(callDOMGetterNode);
2889 set(VirtualRegister(resultOperand), callDOMGetterNode);
2893 bool ByteCodeParser::handleModuleNamespaceLoad(int resultOperand, SpeculatedType prediction, Node* base, GetByIdStatus getById)
2895 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell))
2897 addToGraph(CheckCell, OpInfo(m_graph.freeze(getById.moduleNamespaceObject())), Edge(base, CellUse));
2899 // Ideally we wouldn't have to do this Phantom. But:
2901 // For the constant case: we must do it because otherwise we would have no way of knowing
2902 // that the scope is live at OSR here.
2904 // For the non-constant case: GetClosureVar could be DCE'd, but baseline's implementation
2905 // won't be able to handle an Undefined scope.
2906 addToGraph(Phantom, base);
2908 // Constant folding in the bytecode parser is important for performance. This may not
2909 // have executed yet. If it hasn't, then we won't have a prediction. Lacking a
2910 // prediction, we'd otherwise think that it has to exit. Then when it did execute, we
2911 // would recompile. But if we can fold it here, we avoid the exit.
2912 m_graph.freeze(getById.moduleEnvironment());
2913 if (JSValue value = m_graph.tryGetConstantClosureVar(getById.moduleEnvironment(), getById.scopeOffset())) {
2914 set(VirtualRegister(resultOperand), weakJSConstant(value));
2917 set(VirtualRegister(resultOperand), addToGraph(GetClosureVar, OpInfo(getById.scopeOffset().offset()), OpInfo(prediction), weakJSConstant(getById.moduleEnvironment())));
2921 template<typename ChecksFunctor>
2922 bool ByteCodeParser::handleTypedArrayConstructor(
2923 int resultOperand, InternalFunction* function, int registerOffset,
2924 int argumentCountIncludingThis, TypedArrayType type, const ChecksFunctor& insertChecks)
2926 if (!isTypedView(type))
2929 if (function->classInfo() != constructorClassInfoForType(type))
2932 if (function->globalObject() != m_inlineStackTop->m_codeBlock->globalObject())
2935 // We only have an intrinsic for the case where you say:
2937 // new FooArray(blah);
2939 // Of course, 'blah' could be any of the following:
2941 // - Integer, indicating that you want to allocate an array of that length.
2942 // This is the thing we're hoping for, and what we can actually do meaningful
2943 // optimizations for.
2945 // - Array buffer, indicating that you want to create a view onto that _entire_
2948 // - Non-buffer object, indicating that you want to create a copy of that
2949 // object by pretending that it quacks like an array.
2951 // - Anything else, indicating that you want to have an exception thrown at
2954 // The intrinsic, NewTypedArray, will behave as if it could do any of these
2955 // things up until we do Fixup. Thereafter, if child1 (i.e. 'blah') is
2956 // predicted Int32, then we lock it in as a normal typed array allocation.
2957 // Otherwise, NewTypedArray turns into a totally opaque function call that
2958 // may clobber the world - by virtue of it accessing properties on what could
2961 // Note that although the generic form of NewTypedArray sounds sort of awful,
2962 // it is actually quite likely to be more efficient than a fully generic
2963 // Construct. So, we might want to think about making NewTypedArray variadic,
2964 // or else making Construct not super slow.
2966 if (argumentCountIncludingThis != 2)
2969 if (!function->globalObject()->typedArrayStructureConcurrently(type))
2973 set(VirtualRegister(resultOperand),
2974 addToGraph(NewTypedArray, OpInfo(type), get(virtualRegisterForArgument(1, registerOffset))));
2978 template<typename ChecksFunctor>
2979 bool ByteCodeParser::handleConstantInternalFunction(
2980 Node* callTargetNode, int resultOperand, InternalFunction* function, int registerOffset,
2981 int argumentCountIncludingThis, CodeSpecializationKind kind, SpeculatedType prediction, const ChecksFunctor& insertChecks)
2984 dataLog(" Handling constant internal function ", JSValue(function), "\n");
2986 if (kind == CodeForConstruct) {
2987 Node* newTargetNode = get(virtualRegisterForArgument(0, registerOffset));
2988 // We cannot handle the case where new.target != callee (i.e. a construct from a super call) because we
2989 // don't know what the prototype of the constructed object will be.
2990 // FIXME: If we have inlined super calls up to the call site, however, we should be able to figure out the structure. https://bugs.webkit.org/show_bug.cgi?id=152700
2991 if (newTargetNode != callTargetNode)
2995 if (function->classInfo() == ArrayConstructor::info()) {
2996 if (function->globalObject() != m_inlineStackTop->m_codeBlock->globalObject())
3000 if (argumentCountIncludingThis == 2) {
3001 set(VirtualRegister(resultOperand),
3002 addToGraph(NewArrayWithSize, OpInfo(ArrayWithUndecided), get(virtualRegisterForArgument(1, registerOffset))));
3006 for (int i = 1; i < argumentCountIncludingThis; ++i)
3007 addVarArgChild(get(virtualRegisterForArgument(i, registerOffset)));
3008 set(VirtualRegister(resultOperand),
3009 addToGraph(Node::VarArg, NewArray, OpInfo(ArrayWithUndecided), OpInfo(0)));
3013 if (function->classInfo() == NumberConstructor::info()) {
3014 if (kind == CodeForConstruct)
3018 if (argumentCountIncludingThis <= 1)
3019 set(VirtualRegister(resultOperand), jsConstant(jsNumber(0)));
3021 set(VirtualRegister(resultOperand), addToGraph(ToNumber, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(1, registerOffset))));
3026 if (function->classInfo() == StringConstructor::info()) {
3031 if (argumentCountIncludingThis <= 1)
3032 result = jsConstant(m_vm->smallStrings.emptyString());
3034 result = addToGraph(CallStringConstructor, get(virtualRegisterForArgument(1, registerOffset)));
3036 if (kind == CodeForConstruct)
3037 result = addToGraph(NewStringObject, OpInfo(m_graph.registerStructure(function->globalObject()->stringObjectStructure())), result);
3039 set(VirtualRegister(resultOperand), result);
3043 // FIXME: This should handle construction as well. https://bugs.webkit.org/show_bug.cgi?id=155591
3044 if (function->classInfo() == ObjectConstructor::info() && kind == CodeForCall) {
3048 if (argumentCountIncludingThis <= 1)
3049 result = addToGraph(NewObject, OpInfo(m_graph.registerStructure(function->globalObject()->objectStructureForObjectConstructor())));
3051 result = addToGraph(CallObjectConstructor, get(virtualRegisterForArgument(1, registerOffset)));
3052 set(VirtualRegister(resultOperand), result);
3056 for (unsigned typeIndex = 0; typeIndex < NUMBER_OF_TYPED_ARRAY_TYPES; ++typeIndex) {
3057 bool result = handleTypedArrayConstructor(
3058 resultOperand, function, registerOffset, argumentCountIncludingThis,
3059 indexToTypedArrayType(typeIndex), insertChecks);
3067 Node* ByteCodeParser::handleGetByOffset(
3068 SpeculatedType prediction, Node* base, unsigned identifierNumber, PropertyOffset offset,
3069 const InferredType::Descriptor& inferredType, NodeType op)
3071 Node* propertyStorage;
3072 if (isInlineOffset(offset))
3073 propertyStorage = base;
3075 propertyStorage = addToGraph(GetButterfly, base);
3077 StorageAccessData* data = m_graph.m_storageAccessData.add();
3078 data->offset = offset;
3079 data->identifierNumber = identifierNumber;
3080 data->inferredType = inferredType;
3081 m_graph.registerInferredType(inferredType);
3083 Node* getByOffset = addToGraph(op, OpInfo(data), OpInfo(prediction), propertyStorage, base);
3088 Node* ByteCodeParser::handlePutByOffset(
3089 Node* base, unsigned identifier, PropertyOffset offset, const InferredType::Descriptor& inferredType,
3092 Node* propertyStorage;
3093 if (isInlineOffset(offset))
3094 propertyStorage = base;
3096 propertyStorage = addToGraph(GetButterfly, base);
3098 StorageAccessData* data = m_graph.m_storageAccessData.add();
3099 data->offset = offset;
3100 data->identifierNumber = identifier;
3101 data->inferredType = inferredType;
3102 m_graph.registerInferredType(inferredType);
3104 Node* result = addToGraph(PutByOffset, OpInfo(data), propertyStorage, base, value);
3109 bool ByteCodeParser::check(const ObjectPropertyCondition& condition)
3114 if (m_graph.watchCondition(condition))
3117 Structure* structure = condition.object()->structure();
3118 if (!condition.structureEnsuresValidity(structure))
3123 OpInfo(m_graph.addStructureSet(structure)),
3124 weakJSConstant(condition.object()));
3128 GetByOffsetMethod ByteCodeParser::promoteToConstant(GetByOffsetMethod method)
3130 if (method.kind() == GetByOffsetMethod::LoadFromPrototype
3131 && method.prototype()->structure()->dfgShouldWatch()) {
3132 if (JSValue constant = m_graph.tryGetConstantProperty(method.prototype()->value(), method.prototype()->structure(), method.offset()))
3133 return GetByOffsetMethod::constant(m_graph.freeze(constant));
3139 bool ByteCodeParser::needsDynamicLookup(ResolveType type, OpcodeID opcode)
3141 ASSERT(opcode == op_resolve_scope || opcode == op_get_from_scope || opcode == op_put_to_scope);
3143 JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
3144 if (needsVarInjectionChecks(type) && globalObject->varInjectionWatchpoint()->hasBeenInvalidated())
3148 case GlobalProperty:
3150 case GlobalLexicalVar:
3152 case LocalClosureVar:
3156 case UnresolvedProperty:
3157 case UnresolvedPropertyWithVarInjectionChecks: {
3158 // The heuristic for UnresolvedProperty scope accesses is we will ForceOSRExit if we
3159 // haven't exited from from this access before to let the&n