2c8b0c581341a50285f87f4d875c75c523f6bea1
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGByteCodeParser.cpp
1 /*
2  * Copyright (C) 2011-2015 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGByteCodeParser.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "ArrayConstructor.h"
32 #include "BasicBlockLocation.h"
33 #include "CallLinkStatus.h"
34 #include "CodeBlock.h"
35 #include "CodeBlockWithJITType.h"
36 #include "DFGArrayMode.h"
37 #include "DFGCapabilities.h"
38 #include "DFGGraph.h"
39 #include "DFGJITCode.h"
40 #include "GetByIdStatus.h"
41 #include "Heap.h"
42 #include "JSLexicalEnvironment.h"
43 #include "JSCInlines.h"
44 #include "PreciseJumpTargets.h"
45 #include "PutByIdStatus.h"
46 #include "StackAlignment.h"
47 #include "StringConstructor.h"
48 #include <wtf/CommaPrinter.h>
49 #include <wtf/HashMap.h>
50 #include <wtf/MathExtras.h>
51 #include <wtf/StdLibExtras.h>
52
53 namespace JSC { namespace DFG {
54
55 static const bool verbose = false;
56
57 class ConstantBufferKey {
58 public:
59     ConstantBufferKey()
60         : m_codeBlock(0)
61         , m_index(0)
62     {
63     }
64     
65     ConstantBufferKey(WTF::HashTableDeletedValueType)
66         : m_codeBlock(0)
67         , m_index(1)
68     {
69     }
70     
71     ConstantBufferKey(CodeBlock* codeBlock, unsigned index)
72         : m_codeBlock(codeBlock)
73         , m_index(index)
74     {
75     }
76     
77     bool operator==(const ConstantBufferKey& other) const
78     {
79         return m_codeBlock == other.m_codeBlock
80             && m_index == other.m_index;
81     }
82     
83     unsigned hash() const
84     {
85         return WTF::PtrHash<CodeBlock*>::hash(m_codeBlock) ^ m_index;
86     }
87     
88     bool isHashTableDeletedValue() const
89     {
90         return !m_codeBlock && m_index;
91     }
92     
93     CodeBlock* codeBlock() const { return m_codeBlock; }
94     unsigned index() const { return m_index; }
95     
96 private:
97     CodeBlock* m_codeBlock;
98     unsigned m_index;
99 };
100
101 struct ConstantBufferKeyHash {
102     static unsigned hash(const ConstantBufferKey& key) { return key.hash(); }
103     static bool equal(const ConstantBufferKey& a, const ConstantBufferKey& b)
104     {
105         return a == b;
106     }
107     
108     static const bool safeToCompareToEmptyOrDeleted = true;
109 };
110
111 } } // namespace JSC::DFG
112
113 namespace WTF {
114
115 template<typename T> struct DefaultHash;
116 template<> struct DefaultHash<JSC::DFG::ConstantBufferKey> {
117     typedef JSC::DFG::ConstantBufferKeyHash Hash;
118 };
119
120 template<typename T> struct HashTraits;
121 template<> struct HashTraits<JSC::DFG::ConstantBufferKey> : SimpleClassHashTraits<JSC::DFG::ConstantBufferKey> { };
122
123 } // namespace WTF
124
125 namespace JSC { namespace DFG {
126
127 // === ByteCodeParser ===
128 //
129 // This class is used to compile the dataflow graph from a CodeBlock.
130 class ByteCodeParser {
131 public:
132     ByteCodeParser(Graph& graph)
133         : m_vm(&graph.m_vm)
134         , m_codeBlock(graph.m_codeBlock)
135         , m_profiledBlock(graph.m_profiledBlock)
136         , m_graph(graph)
137         , m_currentBlock(0)
138         , m_currentIndex(0)
139         , m_constantUndefined(graph.freeze(jsUndefined()))
140         , m_constantNull(graph.freeze(jsNull()))
141         , m_constantNaN(graph.freeze(jsNumber(PNaN)))
142         , m_constantOne(graph.freeze(jsNumber(1)))
143         , m_numArguments(m_codeBlock->numParameters())
144         , m_numLocals(m_codeBlock->m_numCalleeRegisters)
145         , m_parameterSlots(0)
146         , m_numPassedVarArgs(0)
147         , m_inlineStackTop(0)
148         , m_haveBuiltOperandMaps(false)
149         , m_currentInstruction(0)
150         , m_hasDebuggerEnabled(graph.hasDebuggerEnabled())
151     {
152         ASSERT(m_profiledBlock);
153     }
154     
155     // Parse a full CodeBlock of bytecode.
156     bool parse();
157     
158 private:
159     struct InlineStackEntry;
160
161     // Just parse from m_currentIndex to the end of the current CodeBlock.
162     void parseCodeBlock();
163     
164     void ensureLocals(unsigned newNumLocals)
165     {
166         if (newNumLocals <= m_numLocals)
167             return;
168         m_numLocals = newNumLocals;
169         for (size_t i = 0; i < m_graph.numBlocks(); ++i)
170             m_graph.block(i)->ensureLocals(newNumLocals);
171     }
172
173     // Helper for min and max.
174     template<typename ChecksFunctor>
175     bool handleMinMax(int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& insertChecks);
176     
177     // Handle calls. This resolves issues surrounding inlining and intrinsics.
178     void handleCall(
179         int result, NodeType op, InlineCallFrame::Kind, unsigned instructionSize,
180         Node* callTarget, int argCount, int registerOffset, CallLinkStatus,
181         SpeculatedType prediction);
182     void handleCall(
183         int result, NodeType op, InlineCallFrame::Kind, unsigned instructionSize,
184         Node* callTarget, int argCount, int registerOffset, CallLinkStatus);
185     void handleCall(int result, NodeType op, CodeSpecializationKind, unsigned instructionSize, int callee, int argCount, int registerOffset);
186     void handleCall(Instruction* pc, NodeType op, CodeSpecializationKind);
187     void handleVarargsCall(Instruction* pc, NodeType op, CodeSpecializationKind);
188     void emitFunctionChecks(CallVariant, Node* callTarget, VirtualRegister thisArgumnt);
189     void emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis);
190     unsigned inliningCost(CallVariant, int argumentCountIncludingThis, CodeSpecializationKind); // Return UINT_MAX if it's not an inlining candidate. By convention, intrinsics have a cost of 1.
191     // Handle inlining. Return true if it succeeded, false if we need to plant a call.
192     bool handleInlining(Node* callTargetNode, int resultOperand, const CallLinkStatus&, int registerOffset, VirtualRegister thisArgument, VirtualRegister argumentsArgument, unsigned argumentsOffset, int argumentCountIncludingThis, unsigned nextOffset, NodeType callOp, InlineCallFrame::Kind, SpeculatedType prediction);
193     enum CallerLinkability { CallerDoesNormalLinking, CallerLinksManually };
194     template<typename ChecksFunctor>
195     bool attemptToInlineCall(Node* callTargetNode, int resultOperand, CallVariant, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind, CallerLinkability, SpeculatedType prediction, unsigned& inliningBalance, const ChecksFunctor& insertChecks);
196     template<typename ChecksFunctor>
197     void inlineCall(Node* callTargetNode, int resultOperand, CallVariant, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind, CallerLinkability, const ChecksFunctor& insertChecks);
198     void cancelLinkingForBlock(InlineStackEntry*, BasicBlock*); // Only works when the given block is the last one to have been added for that inline stack entry.
199     // Handle intrinsic functions. Return true if it succeeded, false if we need to plant a call.
200     template<typename ChecksFunctor>
201     bool handleIntrinsic(int resultOperand, Intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks);
202     template<typename ChecksFunctor>
203     bool handleTypedArrayConstructor(int resultOperand, InternalFunction*, int registerOffset, int argumentCountIncludingThis, TypedArrayType, const ChecksFunctor& insertChecks);
204     template<typename ChecksFunctor>
205     bool handleConstantInternalFunction(int resultOperand, InternalFunction*, int registerOffset, int argumentCountIncludingThis, CodeSpecializationKind, const ChecksFunctor& insertChecks);
206     Node* handlePutByOffset(Node* base, unsigned identifier, PropertyOffset, Node* value);
207     Node* handleGetByOffset(SpeculatedType, Node* base, const StructureSet&, unsigned identifierNumber, PropertyOffset, NodeType op = GetByOffset);
208     void handleGetById(
209         int destinationOperand, SpeculatedType, Node* base, unsigned identifierNumber,
210         const GetByIdStatus&);
211     void emitPutById(
212         Node* base, unsigned identifierNumber, Node* value,  const PutByIdStatus&, bool isDirect);
213     void handlePutById(
214         Node* base, unsigned identifierNumber, Node* value, const PutByIdStatus&,
215         bool isDirect);
216     void emitChecks(const ConstantStructureCheckVector&);
217
218     void prepareToParseBlock();
219     void clearCaches();
220
221     // Parse a single basic block of bytecode instructions.
222     bool parseBlock(unsigned limit);
223     // Link block successors.
224     void linkBlock(BasicBlock*, Vector<BasicBlock*>& possibleTargets);
225     void linkBlocks(Vector<UnlinkedBlock>& unlinkedBlocks, Vector<BasicBlock*>& possibleTargets);
226     
227     VariableAccessData* newVariableAccessData(VirtualRegister operand)
228     {
229         ASSERT(!operand.isConstant());
230         
231         m_graph.m_variableAccessData.append(VariableAccessData(operand));
232         return &m_graph.m_variableAccessData.last();
233     }
234     
235     // Get/Set the operands/result of a bytecode instruction.
236     Node* getDirect(VirtualRegister operand)
237     {
238         ASSERT(!operand.isConstant());
239
240         // Is this an argument?
241         if (operand.isArgument())
242             return getArgument(operand);
243
244         // Must be a local.
245         return getLocal(operand);
246     }
247
248     Node* get(VirtualRegister operand)
249     {
250         if (operand.isConstant()) {
251             unsigned constantIndex = operand.toConstantIndex();
252             unsigned oldSize = m_constants.size();
253             if (constantIndex >= oldSize || !m_constants[constantIndex]) {
254                 const CodeBlock& codeBlock = *m_inlineStackTop->m_codeBlock;
255                 JSValue value = codeBlock.getConstant(operand.offset());
256                 SourceCodeRepresentation sourceCodeRepresentation = codeBlock.constantSourceCodeRepresentation(operand.offset());
257                 if (constantIndex >= oldSize) {
258                     m_constants.grow(constantIndex + 1);
259                     for (unsigned i = oldSize; i < m_constants.size(); ++i)
260                         m_constants[i] = nullptr;
261                 }
262
263                 Node* constantNode = nullptr;
264                 if (sourceCodeRepresentation == SourceCodeRepresentation::Double)
265                     constantNode = addToGraph(DoubleConstant, OpInfo(m_graph.freezeStrong(jsDoubleNumber(value.asNumber()))));
266                 else
267                     constantNode = addToGraph(JSConstant, OpInfo(m_graph.freezeStrong(value)));
268                 m_constants[constantIndex] = constantNode;
269             }
270             ASSERT(m_constants[constantIndex]);
271             return m_constants[constantIndex];
272         }
273         
274         if (inlineCallFrame()) {
275             if (!inlineCallFrame()->isClosureCall) {
276                 JSFunction* callee = inlineCallFrame()->calleeConstant();
277                 if (operand.offset() == JSStack::Callee)
278                     return weakJSConstant(callee);
279             }
280         } else if (operand.offset() == JSStack::Callee) {
281             // We have to do some constant-folding here because this enables CreateThis folding. Note
282             // that we don't have such watchpoint-based folding for inlined uses of Callee, since in that
283             // case if the function is a singleton then we already know it.
284             if (FunctionExecutable* executable = jsDynamicCast<FunctionExecutable*>(m_codeBlock->ownerExecutable())) {
285                 InferredValue* singleton = executable->singletonFunction();
286                 if (JSValue value = singleton->inferredValue()) {
287                     m_graph.watchpoints().addLazily(singleton);
288                     JSFunction* function = jsCast<JSFunction*>(value);
289                     return weakJSConstant(function);
290                 }
291             }
292             return addToGraph(GetCallee);
293         }
294         
295         return getDirect(m_inlineStackTop->remapOperand(operand));
296     }
297     
298     enum SetMode {
299         // A normal set which follows a two-phase commit that spans code origins. During
300         // the current code origin it issues a MovHint, and at the start of the next
301         // code origin there will be a SetLocal. If the local needs flushing, the second
302         // SetLocal will be preceded with a Flush.
303         NormalSet,
304         
305         // A set where the SetLocal happens immediately and there is still a Flush. This
306         // is relevant when assigning to a local in tricky situations for the delayed
307         // SetLocal logic but where we know that we have not performed any side effects
308         // within this code origin. This is a safe replacement for NormalSet anytime we
309         // know that we have not yet performed side effects in this code origin.
310         ImmediateSetWithFlush,
311         
312         // A set where the SetLocal happens immediately and we do not Flush it even if
313         // this is a local that is marked as needing it. This is relevant when
314         // initializing locals at the top of a function.
315         ImmediateNakedSet
316     };
317     Node* setDirect(VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
318     {
319         addToGraph(MovHint, OpInfo(operand.offset()), value);
320
321         DelayedSetLocal delayed(currentCodeOrigin(), operand, value);
322         
323         if (setMode == NormalSet) {
324             m_setLocalQueue.append(delayed);
325             return 0;
326         }
327         
328         return delayed.execute(this, setMode);
329     }
330     
331     void processSetLocalQueue()
332     {
333         for (unsigned i = 0; i < m_setLocalQueue.size(); ++i)
334             m_setLocalQueue[i].execute(this);
335         m_setLocalQueue.resize(0);
336     }
337
338     Node* set(VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
339     {
340         return setDirect(m_inlineStackTop->remapOperand(operand), value, setMode);
341     }
342     
343     Node* injectLazyOperandSpeculation(Node* node)
344     {
345         ASSERT(node->op() == GetLocal);
346         ASSERT(node->origin.semantic.bytecodeIndex == m_currentIndex);
347         ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
348         LazyOperandValueProfileKey key(m_currentIndex, node->local());
349         SpeculatedType prediction = m_inlineStackTop->m_lazyOperands.prediction(locker, key);
350         node->variableAccessData()->predict(prediction);
351         return node;
352     }
353
354     // Used in implementing get/set, above, where the operand is a local variable.
355     Node* getLocal(VirtualRegister operand)
356     {
357         unsigned local = operand.toLocal();
358
359         Node* node = m_currentBlock->variablesAtTail.local(local);
360         
361         // This has two goals: 1) link together variable access datas, and 2)
362         // try to avoid creating redundant GetLocals. (1) is required for
363         // correctness - no other phase will ensure that block-local variable
364         // access data unification is done correctly. (2) is purely opportunistic
365         // and is meant as an compile-time optimization only.
366         
367         VariableAccessData* variable;
368         
369         if (node) {
370             variable = node->variableAccessData();
371             
372             switch (node->op()) {
373             case GetLocal:
374                 return node;
375             case SetLocal:
376                 return node->child1().node();
377             default:
378                 break;
379             }
380         } else
381             variable = newVariableAccessData(operand);
382         
383         node = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variable)));
384         m_currentBlock->variablesAtTail.local(local) = node;
385         return node;
386     }
387
388     Node* setLocal(const CodeOrigin& semanticOrigin, VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
389     {
390         CodeOrigin oldSemanticOrigin = m_currentSemanticOrigin;
391         m_currentSemanticOrigin = semanticOrigin;
392
393         unsigned local = operand.toLocal();
394         
395         if (setMode != ImmediateNakedSet) {
396             ArgumentPosition* argumentPosition = findArgumentPositionForLocal(operand);
397             if (argumentPosition)
398                 flushDirect(operand, argumentPosition);
399             else if (m_hasDebuggerEnabled && operand == m_codeBlock->scopeRegister())
400                 flush(operand);
401         }
402
403         VariableAccessData* variableAccessData = newVariableAccessData(operand);
404         variableAccessData->mergeStructureCheckHoistingFailed(
405             m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex, BadCache));
406         variableAccessData->mergeCheckArrayHoistingFailed(
407             m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex, BadIndexingType));
408         Node* node = addToGraph(SetLocal, OpInfo(variableAccessData), value);
409         m_currentBlock->variablesAtTail.local(local) = node;
410
411         m_currentSemanticOrigin = oldSemanticOrigin;
412         return node;
413     }
414
415     // Used in implementing get/set, above, where the operand is an argument.
416     Node* getArgument(VirtualRegister operand)
417     {
418         unsigned argument = operand.toArgument();
419         ASSERT(argument < m_numArguments);
420         
421         Node* node = m_currentBlock->variablesAtTail.argument(argument);
422
423         VariableAccessData* variable;
424         
425         if (node) {
426             variable = node->variableAccessData();
427             
428             switch (node->op()) {
429             case GetLocal:
430                 return node;
431             case SetLocal:
432                 return node->child1().node();
433             default:
434                 break;
435             }
436         } else
437             variable = newVariableAccessData(operand);
438         
439         node = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variable)));
440         m_currentBlock->variablesAtTail.argument(argument) = node;
441         return node;
442     }
443     Node* setArgument(const CodeOrigin& semanticOrigin, VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
444     {
445         CodeOrigin oldSemanticOrigin = m_currentSemanticOrigin;
446         m_currentSemanticOrigin = semanticOrigin;
447
448         unsigned argument = operand.toArgument();
449         ASSERT(argument < m_numArguments);
450         
451         VariableAccessData* variableAccessData = newVariableAccessData(operand);
452
453         // Always flush arguments, except for 'this'. If 'this' is created by us,
454         // then make sure that it's never unboxed.
455         if (argument) {
456             if (setMode != ImmediateNakedSet)
457                 flushDirect(operand);
458         } else if (m_codeBlock->specializationKind() == CodeForConstruct)
459             variableAccessData->mergeShouldNeverUnbox(true);
460         
461         variableAccessData->mergeStructureCheckHoistingFailed(
462             m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex, BadCache));
463         variableAccessData->mergeCheckArrayHoistingFailed(
464             m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex, BadIndexingType));
465         Node* node = addToGraph(SetLocal, OpInfo(variableAccessData), value);
466         m_currentBlock->variablesAtTail.argument(argument) = node;
467
468         m_currentSemanticOrigin = oldSemanticOrigin;
469         return node;
470     }
471     
472     ArgumentPosition* findArgumentPositionForArgument(int argument)
473     {
474         InlineStackEntry* stack = m_inlineStackTop;
475         while (stack->m_inlineCallFrame)
476             stack = stack->m_caller;
477         return stack->m_argumentPositions[argument];
478     }
479     
480     ArgumentPosition* findArgumentPositionForLocal(VirtualRegister operand)
481     {
482         for (InlineStackEntry* stack = m_inlineStackTop; ; stack = stack->m_caller) {
483             InlineCallFrame* inlineCallFrame = stack->m_inlineCallFrame;
484             if (!inlineCallFrame)
485                 break;
486             if (operand.offset() < static_cast<int>(inlineCallFrame->stackOffset + JSStack::CallFrameHeaderSize))
487                 continue;
488             if (operand.offset() == inlineCallFrame->stackOffset + CallFrame::thisArgumentOffset())
489                 continue;
490             if (operand.offset() >= static_cast<int>(inlineCallFrame->stackOffset + CallFrame::thisArgumentOffset() + inlineCallFrame->arguments.size()))
491                 continue;
492             int argument = VirtualRegister(operand.offset() - inlineCallFrame->stackOffset).toArgument();
493             return stack->m_argumentPositions[argument];
494         }
495         return 0;
496     }
497     
498     ArgumentPosition* findArgumentPosition(VirtualRegister operand)
499     {
500         if (operand.isArgument())
501             return findArgumentPositionForArgument(operand.toArgument());
502         return findArgumentPositionForLocal(operand);
503     }
504
505     void flush(VirtualRegister operand)
506     {
507         flushDirect(m_inlineStackTop->remapOperand(operand));
508     }
509     
510     void flushDirect(VirtualRegister operand)
511     {
512         flushDirect(operand, findArgumentPosition(operand));
513     }
514     
515     void flushDirect(VirtualRegister operand, ArgumentPosition* argumentPosition)
516     {
517         ASSERT(!operand.isConstant());
518         
519         Node* node = m_currentBlock->variablesAtTail.operand(operand);
520         
521         VariableAccessData* variable;
522         
523         if (node)
524             variable = node->variableAccessData();
525         else
526             variable = newVariableAccessData(operand);
527         
528         node = addToGraph(Flush, OpInfo(variable));
529         m_currentBlock->variablesAtTail.operand(operand) = node;
530         if (argumentPosition)
531             argumentPosition->addVariable(variable);
532     }
533     
534     void flush(InlineStackEntry* inlineStackEntry)
535     {
536         int numArguments;
537         if (InlineCallFrame* inlineCallFrame = inlineStackEntry->m_inlineCallFrame) {
538             ASSERT(!m_hasDebuggerEnabled);
539             numArguments = inlineCallFrame->arguments.size();
540             if (inlineCallFrame->isClosureCall)
541                 flushDirect(inlineStackEntry->remapOperand(VirtualRegister(JSStack::Callee)));
542             if (inlineCallFrame->isVarargs())
543                 flushDirect(inlineStackEntry->remapOperand(VirtualRegister(JSStack::ArgumentCount)));
544         } else
545             numArguments = inlineStackEntry->m_codeBlock->numParameters();
546         for (unsigned argument = numArguments; argument-- > 1;)
547             flushDirect(inlineStackEntry->remapOperand(virtualRegisterForArgument(argument)));
548         if (m_hasDebuggerEnabled)
549             flush(m_codeBlock->scopeRegister());
550     }
551
552     void flushForTerminal()
553     {
554         for (InlineStackEntry* inlineStackEntry = m_inlineStackTop; inlineStackEntry; inlineStackEntry = inlineStackEntry->m_caller)
555             flush(inlineStackEntry);
556     }
557
558     void flushForReturn()
559     {
560         flush(m_inlineStackTop);
561     }
562     
563     void flushIfTerminal(SwitchData& data)
564     {
565         if (data.fallThrough.bytecodeIndex() > m_currentIndex)
566             return;
567         
568         for (unsigned i = data.cases.size(); i--;) {
569             if (data.cases[i].target.bytecodeIndex() > m_currentIndex)
570                 return;
571         }
572         
573         flushForTerminal();
574     }
575
576     // Assumes that the constant should be strongly marked.
577     Node* jsConstant(JSValue constantValue)
578     {
579         return addToGraph(JSConstant, OpInfo(m_graph.freezeStrong(constantValue)));
580     }
581
582     Node* weakJSConstant(JSValue constantValue)
583     {
584         return addToGraph(JSConstant, OpInfo(m_graph.freeze(constantValue)));
585     }
586
587     // Helper functions to get/set the this value.
588     Node* getThis()
589     {
590         return get(m_inlineStackTop->m_codeBlock->thisRegister());
591     }
592
593     void setThis(Node* value)
594     {
595         set(m_inlineStackTop->m_codeBlock->thisRegister(), value);
596     }
597
598     InlineCallFrame* inlineCallFrame()
599     {
600         return m_inlineStackTop->m_inlineCallFrame;
601     }
602
603     CodeOrigin currentCodeOrigin()
604     {
605         return CodeOrigin(m_currentIndex, inlineCallFrame());
606     }
607
608     NodeOrigin currentNodeOrigin()
609     {
610         // FIXME: We should set the forExit origin only on those nodes that can exit.
611         // https://bugs.webkit.org/show_bug.cgi?id=145204
612         if (m_currentSemanticOrigin.isSet())
613             return NodeOrigin(m_currentSemanticOrigin, currentCodeOrigin());
614         return NodeOrigin(currentCodeOrigin());
615     }
616     
617     BranchData* branchData(unsigned taken, unsigned notTaken)
618     {
619         // We assume that branches originating from bytecode always have a fall-through. We
620         // use this assumption to avoid checking for the creation of terminal blocks.
621         ASSERT((taken > m_currentIndex) || (notTaken > m_currentIndex));
622         BranchData* data = m_graph.m_branchData.add();
623         *data = BranchData::withBytecodeIndices(taken, notTaken);
624         return data;
625     }
626     
627     Node* addToGraph(Node* node)
628     {
629         if (Options::verboseDFGByteCodeParsing())
630             dataLog("        appended ", node, " ", Graph::opName(node->op()), "\n");
631         m_currentBlock->append(node);
632         return node;
633     }
634     
635     Node* addToGraph(NodeType op, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
636     {
637         Node* result = m_graph.addNode(
638             SpecNone, op, currentNodeOrigin(), Edge(child1), Edge(child2),
639             Edge(child3));
640         return addToGraph(result);
641     }
642     Node* addToGraph(NodeType op, Edge child1, Edge child2 = Edge(), Edge child3 = Edge())
643     {
644         Node* result = m_graph.addNode(
645             SpecNone, op, currentNodeOrigin(), child1, child2, child3);
646         return addToGraph(result);
647     }
648     Node* addToGraph(NodeType op, OpInfo info, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
649     {
650         Node* result = m_graph.addNode(
651             SpecNone, op, currentNodeOrigin(), info, Edge(child1), Edge(child2),
652             Edge(child3));
653         return addToGraph(result);
654     }
655     Node* addToGraph(NodeType op, OpInfo info1, OpInfo info2, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
656     {
657         Node* result = m_graph.addNode(
658             SpecNone, op, currentNodeOrigin(), info1, info2,
659             Edge(child1), Edge(child2), Edge(child3));
660         return addToGraph(result);
661     }
662     
663     Node* addToGraph(Node::VarArgTag, NodeType op, OpInfo info1, OpInfo info2)
664     {
665         Node* result = m_graph.addNode(
666             SpecNone, Node::VarArg, op, currentNodeOrigin(), info1, info2,
667             m_graph.m_varArgChildren.size() - m_numPassedVarArgs, m_numPassedVarArgs);
668         addToGraph(result);
669         
670         m_numPassedVarArgs = 0;
671         
672         return result;
673     }
674     
675     void addVarArgChild(Node* child)
676     {
677         m_graph.m_varArgChildren.append(Edge(child));
678         m_numPassedVarArgs++;
679     }
680     
681     Node* addCallWithoutSettingResult(
682         NodeType op, OpInfo opInfo, Node* callee, int argCount, int registerOffset,
683         SpeculatedType prediction)
684     {
685         addVarArgChild(callee);
686         size_t parameterSlots = JSStack::CallFrameHeaderSize - JSStack::CallerFrameAndPCSize + argCount;
687         if (parameterSlots > m_parameterSlots)
688             m_parameterSlots = parameterSlots;
689
690         for (int i = 0; i < argCount; ++i)
691             addVarArgChild(get(virtualRegisterForArgument(i, registerOffset)));
692
693         return addToGraph(Node::VarArg, op, opInfo, OpInfo(prediction));
694     }
695     
696     Node* addCall(
697         int result, NodeType op, OpInfo opInfo, Node* callee, int argCount, int registerOffset,
698         SpeculatedType prediction)
699     {
700         Node* call = addCallWithoutSettingResult(
701             op, opInfo, callee, argCount, registerOffset, prediction);
702         VirtualRegister resultReg(result);
703         if (resultReg.isValid())
704             set(resultReg, call);
705         return call;
706     }
707     
708     Node* cellConstantWithStructureCheck(JSCell* object, Structure* structure)
709     {
710         Node* objectNode = weakJSConstant(object);
711         addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(structure)), objectNode);
712         return objectNode;
713     }
714     
715     SpeculatedType getPredictionWithoutOSRExit(unsigned bytecodeIndex)
716     {
717         ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
718         return m_inlineStackTop->m_profiledBlock->valueProfilePredictionForBytecodeOffset(locker, bytecodeIndex);
719     }
720
721     SpeculatedType getPrediction(unsigned bytecodeIndex)
722     {
723         SpeculatedType prediction = getPredictionWithoutOSRExit(bytecodeIndex);
724         
725         if (prediction == SpecNone) {
726             // We have no information about what values this node generates. Give up
727             // on executing this code, since we're likely to do more damage than good.
728             addToGraph(ForceOSRExit);
729         }
730         
731         return prediction;
732     }
733     
734     SpeculatedType getPredictionWithoutOSRExit()
735     {
736         return getPredictionWithoutOSRExit(m_currentIndex);
737     }
738     
739     SpeculatedType getPrediction()
740     {
741         return getPrediction(m_currentIndex);
742     }
743     
744     ArrayMode getArrayMode(ArrayProfile* profile, Array::Action action)
745     {
746         ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
747         profile->computeUpdatedPrediction(locker, m_inlineStackTop->m_profiledBlock);
748         bool makeSafe = profile->outOfBounds(locker);
749         return ArrayMode::fromObserved(locker, profile, action, makeSafe);
750     }
751     
752     ArrayMode getArrayMode(ArrayProfile* profile)
753     {
754         return getArrayMode(profile, Array::Read);
755     }
756     
757     Node* makeSafe(Node* node)
758     {
759         if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
760             node->mergeFlags(NodeMayOverflowInDFG);
761         if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
762             node->mergeFlags(NodeMayNegZeroInDFG);
763         
764         if (!isX86() && node->op() == ArithMod)
765             return node;
766
767         if (!m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex))
768             return node;
769         
770         switch (node->op()) {
771         case UInt32ToNumber:
772         case ArithAdd:
773         case ArithSub:
774         case ValueAdd:
775         case ArithMod: // for ArithMod "MayOverflow" means we tried to divide by zero, or we saw double.
776             node->mergeFlags(NodeMayOverflowInBaseline);
777             break;
778             
779         case ArithNegate:
780             // Currently we can't tell the difference between a negation overflowing
781             // (i.e. -(1 << 31)) or generating negative zero (i.e. -0). If it took slow
782             // path then we assume that it did both of those things.
783             node->mergeFlags(NodeMayOverflowInBaseline);
784             node->mergeFlags(NodeMayNegZeroInBaseline);
785             break;
786
787         case ArithMul:
788             // FIXME: We should detect cases where we only overflowed but never created
789             // negative zero.
790             // https://bugs.webkit.org/show_bug.cgi?id=132470
791             if (m_inlineStackTop->m_profiledBlock->likelyToTakeDeepestSlowCase(m_currentIndex)
792                 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
793                 node->mergeFlags(NodeMayOverflowInBaseline | NodeMayNegZeroInBaseline);
794             else if (m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)
795                 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
796                 node->mergeFlags(NodeMayNegZeroInBaseline);
797             break;
798             
799         default:
800             RELEASE_ASSERT_NOT_REACHED();
801             break;
802         }
803         
804         return node;
805     }
806     
807     Node* makeDivSafe(Node* node)
808     {
809         ASSERT(node->op() == ArithDiv);
810         
811         if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
812             node->mergeFlags(NodeMayOverflowInDFG);
813         if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
814             node->mergeFlags(NodeMayNegZeroInDFG);
815         
816         // The main slow case counter for op_div in the old JIT counts only when
817         // the operands are not numbers. We don't care about that since we already
818         // have speculations in place that take care of that separately. We only
819         // care about when the outcome of the division is not an integer, which
820         // is what the special fast case counter tells us.
821         
822         if (!m_inlineStackTop->m_profiledBlock->couldTakeSpecialFastCase(m_currentIndex))
823             return node;
824         
825         // FIXME: It might be possible to make this more granular.
826         node->mergeFlags(NodeMayOverflowInBaseline | NodeMayNegZeroInBaseline);
827         
828         return node;
829     }
830     
831     void noticeArgumentsUse()
832     {
833         // All of the arguments in this function need to be formatted as JSValues because we will
834         // load from them in a random-access fashion and we don't want to have to switch on
835         // format.
836         
837         for (ArgumentPosition* argument : m_inlineStackTop->m_argumentPositions)
838             argument->mergeShouldNeverUnbox(true);
839     }
840     
841     void buildOperandMapsIfNecessary();
842     
843     VM* m_vm;
844     CodeBlock* m_codeBlock;
845     CodeBlock* m_profiledBlock;
846     Graph& m_graph;
847
848     // The current block being generated.
849     BasicBlock* m_currentBlock;
850     // The bytecode index of the current instruction being generated.
851     unsigned m_currentIndex;
852     // The semantic origin of the current node if different from the current Index.
853     CodeOrigin m_currentSemanticOrigin;
854
855     FrozenValue* m_constantUndefined;
856     FrozenValue* m_constantNull;
857     FrozenValue* m_constantNaN;
858     FrozenValue* m_constantOne;
859     Vector<Node*, 16> m_constants;
860
861     // The number of arguments passed to the function.
862     unsigned m_numArguments;
863     // The number of locals (vars + temporaries) used in the function.
864     unsigned m_numLocals;
865     // The number of slots (in units of sizeof(Register)) that we need to
866     // preallocate for arguments to outgoing calls from this frame. This
867     // number includes the CallFrame slots that we initialize for the callee
868     // (but not the callee-initialized CallerFrame and ReturnPC slots).
869     // This number is 0 if and only if this function is a leaf.
870     unsigned m_parameterSlots;
871     // The number of var args passed to the next var arg node.
872     unsigned m_numPassedVarArgs;
873
874     HashMap<ConstantBufferKey, unsigned> m_constantBufferCache;
875     
876     struct InlineStackEntry {
877         ByteCodeParser* m_byteCodeParser;
878         
879         CodeBlock* m_codeBlock;
880         CodeBlock* m_profiledBlock;
881         InlineCallFrame* m_inlineCallFrame;
882         
883         ScriptExecutable* executable() { return m_codeBlock->ownerExecutable(); }
884         
885         QueryableExitProfile m_exitProfile;
886         
887         // Remapping of identifier and constant numbers from the code block being
888         // inlined (inline callee) to the code block that we're inlining into
889         // (the machine code block, which is the transitive, though not necessarily
890         // direct, caller).
891         Vector<unsigned> m_identifierRemap;
892         Vector<unsigned> m_constantBufferRemap;
893         Vector<unsigned> m_switchRemap;
894         
895         // Blocks introduced by this code block, which need successor linking.
896         // May include up to one basic block that includes the continuation after
897         // the callsite in the caller. These must be appended in the order that they
898         // are created, but their bytecodeBegin values need not be in order as they
899         // are ignored.
900         Vector<UnlinkedBlock> m_unlinkedBlocks;
901         
902         // Potential block linking targets. Must be sorted by bytecodeBegin, and
903         // cannot have two blocks that have the same bytecodeBegin.
904         Vector<BasicBlock*> m_blockLinkingTargets;
905         
906         // If the callsite's basic block was split into two, then this will be
907         // the head of the callsite block. It needs its successors linked to the
908         // m_unlinkedBlocks, but not the other way around: there's no way for
909         // any blocks in m_unlinkedBlocks to jump back into this block.
910         BasicBlock* m_callsiteBlockHead;
911         
912         // Does the callsite block head need linking? This is typically true
913         // but will be false for the machine code block's inline stack entry
914         // (since that one is not inlined) and for cases where an inline callee
915         // did the linking for us.
916         bool m_callsiteBlockHeadNeedsLinking;
917         
918         VirtualRegister m_returnValue;
919         
920         // Speculations about variable types collected from the profiled code block,
921         // which are based on OSR exit profiles that past DFG compilatins of this
922         // code block had gathered.
923         LazyOperandValueProfileParser m_lazyOperands;
924         
925         CallLinkInfoMap m_callLinkInfos;
926         StubInfoMap m_stubInfos;
927         
928         // Did we see any returns? We need to handle the (uncommon but necessary)
929         // case where a procedure that does not return was inlined.
930         bool m_didReturn;
931         
932         // Did we have any early returns?
933         bool m_didEarlyReturn;
934         
935         // Pointers to the argument position trackers for this slice of code.
936         Vector<ArgumentPosition*> m_argumentPositions;
937         
938         InlineStackEntry* m_caller;
939         
940         InlineStackEntry(
941             ByteCodeParser*,
942             CodeBlock*,
943             CodeBlock* profiledBlock,
944             BasicBlock* callsiteBlockHead,
945             JSFunction* callee, // Null if this is a closure call.
946             VirtualRegister returnValueVR,
947             VirtualRegister inlineCallFrameStart,
948             int argumentCountIncludingThis,
949             InlineCallFrame::Kind);
950         
951         ~InlineStackEntry()
952         {
953             m_byteCodeParser->m_inlineStackTop = m_caller;
954         }
955         
956         VirtualRegister remapOperand(VirtualRegister operand) const
957         {
958             if (!m_inlineCallFrame)
959                 return operand;
960             
961             ASSERT(!operand.isConstant());
962
963             return VirtualRegister(operand.offset() + m_inlineCallFrame->stackOffset);
964         }
965     };
966     
967     InlineStackEntry* m_inlineStackTop;
968     
969     struct DelayedSetLocal {
970         CodeOrigin m_origin;
971         VirtualRegister m_operand;
972         Node* m_value;
973         
974         DelayedSetLocal() { }
975         DelayedSetLocal(const CodeOrigin& origin, VirtualRegister operand, Node* value)
976             : m_origin(origin)
977             , m_operand(operand)
978             , m_value(value)
979         {
980         }
981         
982         Node* execute(ByteCodeParser* parser, SetMode setMode = NormalSet)
983         {
984             if (m_operand.isArgument())
985                 return parser->setArgument(m_origin, m_operand, m_value, setMode);
986             return parser->setLocal(m_origin, m_operand, m_value, setMode);
987         }
988     };
989     
990     Vector<DelayedSetLocal, 2> m_setLocalQueue;
991
992     // Have we built operand maps? We initialize them lazily, and only when doing
993     // inlining.
994     bool m_haveBuiltOperandMaps;
995     // Mapping between identifier names and numbers.
996     BorrowedIdentifierMap m_identifierMap;
997     
998     CodeBlock* m_dfgCodeBlock;
999     CallLinkStatus::ContextMap m_callContextMap;
1000     StubInfoMap m_dfgStubInfos;
1001     
1002     Instruction* m_currentInstruction;
1003     bool m_hasDebuggerEnabled;
1004 };
1005
1006 #define NEXT_OPCODE(name) \
1007     m_currentIndex += OPCODE_LENGTH(name); \
1008     continue
1009
1010 #define LAST_OPCODE(name) \
1011     m_currentIndex += OPCODE_LENGTH(name); \
1012     return shouldContinueParsing
1013
1014 void ByteCodeParser::handleCall(Instruction* pc, NodeType op, CodeSpecializationKind kind)
1015 {
1016     ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct));
1017     handleCall(
1018         pc[1].u.operand, op, kind, OPCODE_LENGTH(op_call),
1019         pc[2].u.operand, pc[3].u.operand, -pc[4].u.operand);
1020 }
1021
1022 void ByteCodeParser::handleCall(
1023     int result, NodeType op, CodeSpecializationKind kind, unsigned instructionSize,
1024     int callee, int argumentCountIncludingThis, int registerOffset)
1025 {
1026     Node* callTarget = get(VirtualRegister(callee));
1027     
1028     CallLinkStatus callLinkStatus = CallLinkStatus::computeFor(
1029         m_inlineStackTop->m_profiledBlock, currentCodeOrigin(),
1030         m_inlineStackTop->m_callLinkInfos, m_callContextMap);
1031     
1032     handleCall(
1033         result, op, InlineCallFrame::kindFor(kind), instructionSize, callTarget,
1034         argumentCountIncludingThis, registerOffset, callLinkStatus);
1035 }
1036     
1037 void ByteCodeParser::handleCall(
1038     int result, NodeType op, InlineCallFrame::Kind kind, unsigned instructionSize,
1039     Node* callTarget, int argumentCountIncludingThis, int registerOffset,
1040     CallLinkStatus callLinkStatus)
1041 {
1042     handleCall(
1043         result, op, kind, instructionSize, callTarget, argumentCountIncludingThis,
1044         registerOffset, callLinkStatus, getPrediction());
1045 }
1046
1047 void ByteCodeParser::handleCall(
1048     int result, NodeType op, InlineCallFrame::Kind kind, unsigned instructionSize,
1049     Node* callTarget, int argumentCountIncludingThis, int registerOffset,
1050     CallLinkStatus callLinkStatus, SpeculatedType prediction)
1051 {
1052     ASSERT(registerOffset <= 0);
1053     
1054     if (callTarget->isCellConstant())
1055         callLinkStatus.setProvenConstantCallee(CallVariant(callTarget->asCell()));
1056     
1057     if (Options::verboseDFGByteCodeParsing())
1058         dataLog("    Handling call at ", currentCodeOrigin(), ": ", callLinkStatus, "\n");
1059     
1060     if (!callLinkStatus.canOptimize()) {
1061         // Oddly, this conflates calls that haven't executed with calls that behaved sufficiently polymorphically
1062         // that we cannot optimize them.
1063         
1064         addCall(result, op, OpInfo(), callTarget, argumentCountIncludingThis, registerOffset, prediction);
1065         return;
1066     }
1067     
1068     unsigned nextOffset = m_currentIndex + instructionSize;
1069     
1070     OpInfo callOpInfo;
1071     
1072     if (handleInlining(callTarget, result, callLinkStatus, registerOffset, virtualRegisterForArgument(0, registerOffset), VirtualRegister(), 0, argumentCountIncludingThis, nextOffset, op, kind, prediction)) {
1073         if (m_graph.compilation())
1074             m_graph.compilation()->noticeInlinedCall();
1075         return;
1076     }
1077     
1078 #if ENABLE(FTL_NATIVE_CALL_INLINING)
1079     if (isFTL(m_graph.m_plan.mode) && Options::optimizeNativeCalls() && callLinkStatus.size() == 1 && !callLinkStatus.couldTakeSlowPath()) {
1080         CallVariant callee = callLinkStatus[0];
1081         JSFunction* function = callee.function();
1082         CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1083         if (function && function->isHostFunction()) {
1084             emitFunctionChecks(callee, callTarget, virtualRegisterForArgument(0, registerOffset));
1085             callOpInfo = OpInfo(m_graph.freeze(function));
1086
1087             if (op == Call)
1088                 op = NativeCall;
1089             else {
1090                 ASSERT(op == Construct);
1091                 op = NativeConstruct;
1092             }
1093         }
1094     }
1095 #endif
1096     
1097     addCall(result, op, callOpInfo, callTarget, argumentCountIncludingThis, registerOffset, prediction);
1098 }
1099
1100 void ByteCodeParser::handleVarargsCall(Instruction* pc, NodeType op, CodeSpecializationKind kind)
1101 {
1102     ASSERT(OPCODE_LENGTH(op_call_varargs) == OPCODE_LENGTH(op_construct_varargs));
1103     
1104     int result = pc[1].u.operand;
1105     int callee = pc[2].u.operand;
1106     int thisReg = pc[3].u.operand;
1107     int arguments = pc[4].u.operand;
1108     int firstFreeReg = pc[5].u.operand;
1109     int firstVarArgOffset = pc[6].u.operand;
1110     
1111     SpeculatedType prediction = getPrediction();
1112     
1113     Node* callTarget = get(VirtualRegister(callee));
1114     
1115     CallLinkStatus callLinkStatus = CallLinkStatus::computeFor(
1116         m_inlineStackTop->m_profiledBlock, currentCodeOrigin(),
1117         m_inlineStackTop->m_callLinkInfos, m_callContextMap);
1118     if (callTarget->isCellConstant())
1119         callLinkStatus.setProvenConstantCallee(CallVariant(callTarget->asCell()));
1120     
1121     if (Options::verboseDFGByteCodeParsing())
1122         dataLog("    Varargs call link status at ", currentCodeOrigin(), ": ", callLinkStatus, "\n");
1123     
1124     if (callLinkStatus.canOptimize()
1125         && handleInlining(callTarget, result, callLinkStatus, firstFreeReg, VirtualRegister(thisReg), VirtualRegister(arguments), firstVarArgOffset, 0, m_currentIndex + OPCODE_LENGTH(op_call_varargs), op, InlineCallFrame::varargsKindFor(kind), prediction)) {
1126         if (m_graph.compilation())
1127             m_graph.compilation()->noticeInlinedCall();
1128         return;
1129     }
1130     
1131     CallVarargsData* data = m_graph.m_callVarargsData.add();
1132     data->firstVarArgOffset = firstVarArgOffset;
1133     
1134     Node* thisChild = get(VirtualRegister(thisReg));
1135     
1136     Node* call = addToGraph(op, OpInfo(data), OpInfo(prediction), callTarget, get(VirtualRegister(arguments)), thisChild);
1137     VirtualRegister resultReg(result);
1138     if (resultReg.isValid())
1139         set(resultReg, call);
1140 }
1141
1142 void ByteCodeParser::emitFunctionChecks(CallVariant callee, Node* callTarget, VirtualRegister thisArgumentReg)
1143 {
1144     Node* thisArgument;
1145     if (thisArgumentReg.isValid())
1146         thisArgument = get(thisArgumentReg);
1147     else
1148         thisArgument = 0;
1149
1150     JSCell* calleeCell;
1151     Node* callTargetForCheck;
1152     if (callee.isClosureCall()) {
1153         calleeCell = callee.executable();
1154         callTargetForCheck = addToGraph(GetExecutable, callTarget);
1155     } else {
1156         calleeCell = callee.nonExecutableCallee();
1157         callTargetForCheck = callTarget;
1158     }
1159     
1160     ASSERT(calleeCell);
1161     addToGraph(CheckCell, OpInfo(m_graph.freeze(calleeCell)), callTargetForCheck, thisArgument);
1162 }
1163
1164 void ByteCodeParser::emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis)
1165 {
1166     for (int i = 0; i < argumentCountIncludingThis; ++i)
1167         addToGraph(Phantom, get(virtualRegisterForArgument(i, registerOffset)));
1168 }
1169
1170 unsigned ByteCodeParser::inliningCost(CallVariant callee, int argumentCountIncludingThis, CodeSpecializationKind kind)
1171 {
1172     if (verbose)
1173         dataLog("Considering inlining ", callee, " into ", currentCodeOrigin(), "\n");
1174     
1175     if (m_hasDebuggerEnabled) {
1176         if (verbose)
1177             dataLog("    Failing because the debugger is in use.\n");
1178         return UINT_MAX;
1179     }
1180
1181     FunctionExecutable* executable = callee.functionExecutable();
1182     if (!executable) {
1183         if (verbose)
1184             dataLog("    Failing because there is no function executable.\n");
1185         return UINT_MAX;
1186     }
1187     
1188     // Does the number of arguments we're passing match the arity of the target? We currently
1189     // inline only if the number of arguments passed is greater than or equal to the number
1190     // arguments expected.
1191     if (static_cast<int>(executable->parameterCount()) + 1 > argumentCountIncludingThis) {
1192         if (verbose)
1193             dataLog("    Failing because of arity mismatch.\n");
1194         return UINT_MAX;
1195     }
1196     
1197     // Do we have a code block, and does the code block's size match the heuristics/requirements for
1198     // being an inline candidate? We might not have a code block (1) if code was thrown away,
1199     // (2) if we simply hadn't actually made this call yet or (3) code is a builtin function and
1200     // specialization kind is construct. In the former 2 cases, we could still theoretically attempt
1201     // to inline it if we had a static proof of what was being called; this might happen for example
1202     // if you call a global function, where watchpointing gives us static information. Overall,
1203     // it's a rare case because we expect that any hot callees would have already been compiled.
1204     CodeBlock* codeBlock = executable->baselineCodeBlockFor(kind);
1205     if (!codeBlock) {
1206         if (verbose)
1207             dataLog("    Failing because no code block available.\n");
1208         return UINT_MAX;
1209     }
1210     CapabilityLevel capabilityLevel = inlineFunctionForCapabilityLevel(
1211         codeBlock, kind, callee.isClosureCall());
1212     if (verbose) {
1213         dataLog("    Kind: ", kind, "\n");
1214         dataLog("    Is closure call: ", callee.isClosureCall(), "\n");
1215         dataLog("    Capability level: ", capabilityLevel, "\n");
1216         dataLog("    Might inline function: ", mightInlineFunctionFor(codeBlock, kind), "\n");
1217         dataLog("    Might compile function: ", mightCompileFunctionFor(codeBlock, kind), "\n");
1218         dataLog("    Is supported for inlining: ", isSupportedForInlining(codeBlock), "\n");
1219         dataLog("    Needs activation: ", codeBlock->ownerExecutable()->needsActivation(), "\n");
1220         dataLog("    Is inlining candidate: ", codeBlock->ownerExecutable()->isInliningCandidate(), "\n");
1221     }
1222     if (!canInline(capabilityLevel)) {
1223         if (verbose)
1224             dataLog("    Failing because the function is not inlineable.\n");
1225         return UINT_MAX;
1226     }
1227     
1228     // Check if the caller is already too large. We do this check here because that's just
1229     // where we happen to also have the callee's code block, and we want that for the
1230     // purpose of unsetting SABI.
1231     if (!isSmallEnoughToInlineCodeInto(m_codeBlock)) {
1232         codeBlock->m_shouldAlwaysBeInlined = false;
1233         if (verbose)
1234             dataLog("    Failing because the caller is too large.\n");
1235         return UINT_MAX;
1236     }
1237     
1238     // FIXME: this should be better at predicting how much bloat we will introduce by inlining
1239     // this function.
1240     // https://bugs.webkit.org/show_bug.cgi?id=127627
1241     
1242     // FIXME: We currently inline functions that have run in LLInt but not in Baseline. These
1243     // functions have very low fidelity profiling, and presumably they weren't very hot if they
1244     // haven't gotten to Baseline yet. Consider not inlining these functions.
1245     // https://bugs.webkit.org/show_bug.cgi?id=145503
1246     
1247     // Have we exceeded inline stack depth, or are we trying to inline a recursive call to
1248     // too many levels? If either of these are detected, then don't inline. We adjust our
1249     // heuristics if we are dealing with a function that cannot otherwise be compiled.
1250     
1251     unsigned depth = 0;
1252     unsigned recursion = 0;
1253     
1254     for (InlineStackEntry* entry = m_inlineStackTop; entry; entry = entry->m_caller) {
1255         ++depth;
1256         if (depth >= Options::maximumInliningDepth()) {
1257             if (verbose)
1258                 dataLog("    Failing because depth exceeded.\n");
1259             return UINT_MAX;
1260         }
1261         
1262         if (entry->executable() == executable) {
1263             ++recursion;
1264             if (recursion >= Options::maximumInliningRecursion()) {
1265                 if (verbose)
1266                     dataLog("    Failing because recursion detected.\n");
1267                 return UINT_MAX;
1268             }
1269         }
1270     }
1271     
1272     if (verbose)
1273         dataLog("    Inlining should be possible.\n");
1274     
1275     // It might be possible to inline.
1276     return codeBlock->instructionCount();
1277 }
1278
1279 template<typename ChecksFunctor>
1280 void ByteCodeParser::inlineCall(Node* callTargetNode, int resultOperand, CallVariant callee, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind kind, CallerLinkability callerLinkability, const ChecksFunctor& insertChecks)
1281 {
1282     CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1283     
1284     ASSERT(inliningCost(callee, argumentCountIncludingThis, specializationKind) != UINT_MAX);
1285     
1286     CodeBlock* codeBlock = callee.functionExecutable()->baselineCodeBlockFor(specializationKind);
1287     insertChecks(codeBlock);
1288
1289     // FIXME: Don't flush constants!
1290     
1291     int inlineCallFrameStart = m_inlineStackTop->remapOperand(VirtualRegister(registerOffset)).offset() + JSStack::CallFrameHeaderSize;
1292     
1293     ensureLocals(
1294         VirtualRegister(inlineCallFrameStart).toLocal() + 1 +
1295         JSStack::CallFrameHeaderSize + codeBlock->m_numCalleeRegisters);
1296     
1297     size_t argumentPositionStart = m_graph.m_argumentPositions.size();
1298
1299     VirtualRegister resultReg(resultOperand);
1300     if (resultReg.isValid())
1301         resultReg = m_inlineStackTop->remapOperand(resultReg);
1302     
1303     InlineStackEntry inlineStackEntry(
1304         this, codeBlock, codeBlock, m_graph.lastBlock(), callee.function(), resultReg,
1305         (VirtualRegister)inlineCallFrameStart, argumentCountIncludingThis, kind);
1306     
1307     // This is where the actual inlining really happens.
1308     unsigned oldIndex = m_currentIndex;
1309     m_currentIndex = 0;
1310
1311     InlineVariableData inlineVariableData;
1312     inlineVariableData.inlineCallFrame = m_inlineStackTop->m_inlineCallFrame;
1313     inlineVariableData.argumentPositionStart = argumentPositionStart;
1314     inlineVariableData.calleeVariable = 0;
1315     
1316     RELEASE_ASSERT(
1317         m_inlineStackTop->m_inlineCallFrame->isClosureCall
1318         == callee.isClosureCall());
1319     if (callee.isClosureCall()) {
1320         VariableAccessData* calleeVariable =
1321             set(VirtualRegister(JSStack::Callee), callTargetNode, ImmediateNakedSet)->variableAccessData();
1322         
1323         calleeVariable->mergeShouldNeverUnbox(true);
1324         
1325         inlineVariableData.calleeVariable = calleeVariable;
1326     }
1327     
1328     m_graph.m_inlineVariableData.append(inlineVariableData);
1329     
1330     parseCodeBlock();
1331     clearCaches(); // Reset our state now that we're back to the outer code.
1332     
1333     m_currentIndex = oldIndex;
1334     
1335     // If the inlined code created some new basic blocks, then we have linking to do.
1336     if (inlineStackEntry.m_callsiteBlockHead != m_graph.lastBlock()) {
1337         
1338         ASSERT(!inlineStackEntry.m_unlinkedBlocks.isEmpty());
1339         if (inlineStackEntry.m_callsiteBlockHeadNeedsLinking)
1340             linkBlock(inlineStackEntry.m_callsiteBlockHead, inlineStackEntry.m_blockLinkingTargets);
1341         else
1342             ASSERT(inlineStackEntry.m_callsiteBlockHead->isLinked);
1343         
1344         if (callerLinkability == CallerDoesNormalLinking)
1345             cancelLinkingForBlock(inlineStackEntry.m_caller, inlineStackEntry.m_callsiteBlockHead);
1346         
1347         linkBlocks(inlineStackEntry.m_unlinkedBlocks, inlineStackEntry.m_blockLinkingTargets);
1348     } else
1349         ASSERT(inlineStackEntry.m_unlinkedBlocks.isEmpty());
1350     
1351     BasicBlock* lastBlock = m_graph.lastBlock();
1352     // If there was a return, but no early returns, then we're done. We allow parsing of
1353     // the caller to continue in whatever basic block we're in right now.
1354     if (!inlineStackEntry.m_didEarlyReturn && inlineStackEntry.m_didReturn) {
1355         if (Options::verboseDFGByteCodeParsing())
1356             dataLog("    Allowing parsing to continue in last inlined block.\n");
1357         
1358         ASSERT(lastBlock->isEmpty() || !lastBlock->terminal());
1359         
1360         // If we created new blocks then the last block needs linking, but in the
1361         // caller. It doesn't need to be linked to, but it needs outgoing links.
1362         if (!inlineStackEntry.m_unlinkedBlocks.isEmpty()) {
1363             // For debugging purposes, set the bytecodeBegin. Note that this doesn't matter
1364             // for release builds because this block will never serve as a potential target
1365             // in the linker's binary search.
1366             if (Options::verboseDFGByteCodeParsing())
1367                 dataLog("        Repurposing last block from ", lastBlock->bytecodeBegin, " to ", m_currentIndex, "\n");
1368             lastBlock->bytecodeBegin = m_currentIndex;
1369             if (callerLinkability == CallerDoesNormalLinking) {
1370                 if (verbose)
1371                     dataLog("Adding unlinked block ", RawPointer(m_graph.lastBlock()), " (one return)\n");
1372                 m_inlineStackTop->m_caller->m_unlinkedBlocks.append(UnlinkedBlock(m_graph.lastBlock()));
1373             }
1374         }
1375         
1376         m_currentBlock = m_graph.lastBlock();
1377         return;
1378     }
1379     
1380     if (Options::verboseDFGByteCodeParsing())
1381         dataLog("    Creating new block after inlining.\n");
1382
1383     // If we get to this point then all blocks must end in some sort of terminals.
1384     ASSERT(lastBlock->terminal());
1385
1386     // Need to create a new basic block for the continuation at the caller.
1387     RefPtr<BasicBlock> block = adoptRef(new BasicBlock(nextOffset, m_numArguments, m_numLocals, PNaN));
1388
1389     // Link the early returns to the basic block we're about to create.
1390     for (size_t i = 0; i < inlineStackEntry.m_unlinkedBlocks.size(); ++i) {
1391         if (!inlineStackEntry.m_unlinkedBlocks[i].m_needsEarlyReturnLinking)
1392             continue;
1393         BasicBlock* blockToLink = inlineStackEntry.m_unlinkedBlocks[i].m_block;
1394         ASSERT(!blockToLink->isLinked);
1395         Node* node = blockToLink->terminal();
1396         ASSERT(node->op() == Jump);
1397         ASSERT(!node->targetBlock());
1398         node->targetBlock() = block.get();
1399         inlineStackEntry.m_unlinkedBlocks[i].m_needsEarlyReturnLinking = false;
1400         if (verbose)
1401             dataLog("Marking ", RawPointer(blockToLink), " as linked (jumps to return)\n");
1402         blockToLink->didLink();
1403     }
1404     
1405     m_currentBlock = block.get();
1406     ASSERT(m_inlineStackTop->m_caller->m_blockLinkingTargets.isEmpty() || m_inlineStackTop->m_caller->m_blockLinkingTargets.last()->bytecodeBegin < nextOffset);
1407     if (verbose)
1408         dataLog("Adding unlinked block ", RawPointer(block.get()), " (many returns)\n");
1409     if (callerLinkability == CallerDoesNormalLinking) {
1410         m_inlineStackTop->m_caller->m_unlinkedBlocks.append(UnlinkedBlock(block.get()));
1411         m_inlineStackTop->m_caller->m_blockLinkingTargets.append(block.get());
1412     }
1413     m_graph.appendBlock(block);
1414     prepareToParseBlock();
1415 }
1416
1417 void ByteCodeParser::cancelLinkingForBlock(InlineStackEntry* inlineStackEntry, BasicBlock* block)
1418 {
1419     // It's possible that the callsite block head is not owned by the caller.
1420     if (!inlineStackEntry->m_unlinkedBlocks.isEmpty()) {
1421         // It's definitely owned by the caller, because the caller created new blocks.
1422         // Assert that this all adds up.
1423         ASSERT_UNUSED(block, inlineStackEntry->m_unlinkedBlocks.last().m_block == block);
1424         ASSERT(inlineStackEntry->m_unlinkedBlocks.last().m_needsNormalLinking);
1425         inlineStackEntry->m_unlinkedBlocks.last().m_needsNormalLinking = false;
1426     } else {
1427         // It's definitely not owned by the caller. Tell the caller that he does not
1428         // need to link his callsite block head, because we did it for him.
1429         ASSERT(inlineStackEntry->m_callsiteBlockHeadNeedsLinking);
1430         ASSERT_UNUSED(block, inlineStackEntry->m_callsiteBlockHead == block);
1431         inlineStackEntry->m_callsiteBlockHeadNeedsLinking = false;
1432     }
1433 }
1434
1435 template<typename ChecksFunctor>
1436 bool ByteCodeParser::attemptToInlineCall(Node* callTargetNode, int resultOperand, CallVariant callee, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind kind, CallerLinkability callerLinkability, SpeculatedType prediction, unsigned& inliningBalance, const ChecksFunctor& insertChecks)
1437 {
1438     CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1439     
1440     if (!inliningBalance)
1441         return false;
1442     
1443     bool didInsertChecks = false;
1444     auto insertChecksWithAccounting = [&] () {
1445         insertChecks(nullptr);
1446         didInsertChecks = true;
1447     };
1448     
1449     if (verbose)
1450         dataLog("    Considering callee ", callee, "\n");
1451     
1452     // Intrinsics and internal functions can only be inlined if we're not doing varargs. This is because
1453     // we currently don't have any way of getting profiling information for arguments to non-JS varargs
1454     // calls. The prediction propagator won't be of any help because LoadVarargs obscures the data flow,
1455     // and there are no callsite value profiles and native function won't have callee value profiles for
1456     // those arguments. Even worse, if the intrinsic decides to exit, it won't really have anywhere to
1457     // exit to: LoadVarargs is effectful and it's part of the op_call_varargs, so we can't exit without
1458     // calling LoadVarargs twice.
1459     if (!InlineCallFrame::isVarargs(kind)) {
1460         if (InternalFunction* function = callee.internalFunction()) {
1461             if (handleConstantInternalFunction(resultOperand, function, registerOffset, argumentCountIncludingThis, specializationKind, insertChecksWithAccounting)) {
1462                 RELEASE_ASSERT(didInsertChecks);
1463                 addToGraph(Phantom, callTargetNode);
1464                 emitArgumentPhantoms(registerOffset, argumentCountIncludingThis);
1465                 inliningBalance--;
1466                 return true;
1467             }
1468             RELEASE_ASSERT(!didInsertChecks);
1469             return false;
1470         }
1471     
1472         Intrinsic intrinsic = callee.intrinsicFor(specializationKind);
1473         if (intrinsic != NoIntrinsic) {
1474             if (handleIntrinsic(resultOperand, intrinsic, registerOffset, argumentCountIncludingThis, prediction, insertChecksWithAccounting)) {
1475                 RELEASE_ASSERT(didInsertChecks);
1476                 addToGraph(Phantom, callTargetNode);
1477                 emitArgumentPhantoms(registerOffset, argumentCountIncludingThis);
1478                 inliningBalance--;
1479                 return true;
1480             }
1481             RELEASE_ASSERT(!didInsertChecks);
1482             return false;
1483         }
1484     }
1485     
1486     unsigned myInliningCost = inliningCost(callee, argumentCountIncludingThis, specializationKind);
1487     if (myInliningCost > inliningBalance)
1488         return false;
1489
1490     inlineCall(callTargetNode, resultOperand, callee, registerOffset, argumentCountIncludingThis, nextOffset, kind, callerLinkability, insertChecks);
1491     inliningBalance -= myInliningCost;
1492     return true;
1493 }
1494
1495 bool ByteCodeParser::handleInlining(
1496     Node* callTargetNode, int resultOperand, const CallLinkStatus& callLinkStatus,
1497     int registerOffsetOrFirstFreeReg, VirtualRegister thisArgument,
1498     VirtualRegister argumentsArgument, unsigned argumentsOffset, int argumentCountIncludingThis,
1499     unsigned nextOffset, NodeType callOp, InlineCallFrame::Kind kind, SpeculatedType prediction)
1500 {
1501     if (verbose) {
1502         dataLog("Handling inlining...\n");
1503         dataLog("Stack: ", currentCodeOrigin(), "\n");
1504     }
1505     CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1506     
1507     if (!callLinkStatus.size()) {
1508         if (verbose)
1509             dataLog("Bailing inlining.\n");
1510         return false;
1511     }
1512     
1513     if (InlineCallFrame::isVarargs(kind)
1514         && callLinkStatus.maxNumArguments() > Options::maximumVarargsForInlining()) {
1515         if (verbose)
1516             dataLog("Bailing inlining because of varargs.\n");
1517         return false;
1518     }
1519         
1520     unsigned inliningBalance = Options::maximumFunctionForCallInlineCandidateInstructionCount();
1521     if (specializationKind == CodeForConstruct)
1522         inliningBalance = std::min(inliningBalance, Options::maximumFunctionForConstructInlineCandidateInstructionCount());
1523     if (callLinkStatus.isClosureCall())
1524         inliningBalance = std::min(inliningBalance, Options::maximumFunctionForClosureCallInlineCandidateInstructionCount());
1525     
1526     // First check if we can avoid creating control flow. Our inliner does some CFG
1527     // simplification on the fly and this helps reduce compile times, but we can only leverage
1528     // this in cases where we don't need control flow diamonds to check the callee.
1529     if (!callLinkStatus.couldTakeSlowPath() && callLinkStatus.size() == 1) {
1530         int registerOffset;
1531         
1532         // Only used for varargs calls.
1533         unsigned mandatoryMinimum = 0;
1534         unsigned maxNumArguments = 0;
1535
1536         if (InlineCallFrame::isVarargs(kind)) {
1537             if (FunctionExecutable* functionExecutable = callLinkStatus[0].functionExecutable())
1538                 mandatoryMinimum = functionExecutable->parameterCount();
1539             else
1540                 mandatoryMinimum = 0;
1541             
1542             // includes "this"
1543             maxNumArguments = std::max(
1544                 callLinkStatus.maxNumArguments(),
1545                 mandatoryMinimum + 1);
1546             
1547             // We sort of pretend that this *is* the number of arguments that were passed.
1548             argumentCountIncludingThis = maxNumArguments;
1549             
1550             registerOffset = registerOffsetOrFirstFreeReg + 1;
1551             registerOffset -= maxNumArguments; // includes "this"
1552             registerOffset -= JSStack::CallFrameHeaderSize;
1553             registerOffset = -WTF::roundUpToMultipleOf(
1554                 stackAlignmentRegisters(),
1555                 -registerOffset);
1556         } else
1557             registerOffset = registerOffsetOrFirstFreeReg;
1558         
1559         bool result = attemptToInlineCall(
1560             callTargetNode, resultOperand, callLinkStatus[0], registerOffset,
1561             argumentCountIncludingThis, nextOffset, kind, CallerDoesNormalLinking, prediction,
1562             inliningBalance, [&] (CodeBlock* codeBlock) {
1563                 emitFunctionChecks(callLinkStatus[0], callTargetNode, thisArgument);
1564
1565                 // If we have a varargs call, we want to extract the arguments right now.
1566                 if (InlineCallFrame::isVarargs(kind)) {
1567                     int remappedRegisterOffset =
1568                         m_inlineStackTop->remapOperand(VirtualRegister(registerOffset)).offset();
1569                     
1570                     ensureLocals(VirtualRegister(remappedRegisterOffset).toLocal());
1571                     
1572                     int argumentStart = registerOffset + JSStack::CallFrameHeaderSize;
1573                     int remappedArgumentStart =
1574                         m_inlineStackTop->remapOperand(VirtualRegister(argumentStart)).offset();
1575
1576                     LoadVarargsData* data = m_graph.m_loadVarargsData.add();
1577                     data->start = VirtualRegister(remappedArgumentStart + 1);
1578                     data->count = VirtualRegister(remappedRegisterOffset + JSStack::ArgumentCount);
1579                     data->offset = argumentsOffset;
1580                     data->limit = maxNumArguments;
1581                     data->mandatoryMinimum = mandatoryMinimum;
1582             
1583                     addToGraph(LoadVarargs, OpInfo(data), get(argumentsArgument));
1584
1585                     // LoadVarargs may OSR exit. Hence, we need to keep alive callTargetNode, thisArgument
1586                     // and argumentsArgument for the baseline JIT. However, we only need a Phantom for
1587                     // callTargetNode because the other 2 are still in use and alive at this point.
1588                     addToGraph(Phantom, callTargetNode);
1589
1590                     // In DFG IR before SSA, we cannot insert control flow between after the
1591                     // LoadVarargs and the last SetArgument. This isn't a problem once we get to DFG
1592                     // SSA. Fortunately, we also have other reasons for not inserting control flow
1593                     // before SSA.
1594             
1595                     VariableAccessData* countVariable = newVariableAccessData(
1596                         VirtualRegister(remappedRegisterOffset + JSStack::ArgumentCount));
1597                     // This is pretty lame, but it will force the count to be flushed as an int. This doesn't
1598                     // matter very much, since our use of a SetArgument and Flushes for this local slot is
1599                     // mostly just a formality.
1600                     countVariable->predict(SpecInt32);
1601                     countVariable->mergeIsProfitableToUnbox(true);
1602                     Node* setArgumentCount = addToGraph(SetArgument, OpInfo(countVariable));
1603                     m_currentBlock->variablesAtTail.setOperand(countVariable->local(), setArgumentCount);
1604
1605                     set(VirtualRegister(argumentStart), get(thisArgument), ImmediateNakedSet);
1606                     for (unsigned argument = 1; argument < maxNumArguments; ++argument) {
1607                         VariableAccessData* variable = newVariableAccessData(
1608                             VirtualRegister(remappedArgumentStart + argument));
1609                         variable->mergeShouldNeverUnbox(true); // We currently have nowhere to put the type check on the LoadVarargs. LoadVarargs is effectful, so after it finishes, we cannot exit.
1610                         
1611                         // For a while it had been my intention to do things like this inside the
1612                         // prediction injection phase. But in this case it's really best to do it here,
1613                         // because it's here that we have access to the variable access datas for the
1614                         // inlining we're about to do.
1615                         //
1616                         // Something else that's interesting here is that we'd really love to get
1617                         // predictions from the arguments loaded at the callsite, rather than the
1618                         // arguments received inside the callee. But that probably won't matter for most
1619                         // calls.
1620                         if (codeBlock && argument < static_cast<unsigned>(codeBlock->numParameters())) {
1621                             ConcurrentJITLocker locker(codeBlock->m_lock);
1622                             if (ValueProfile* profile = codeBlock->valueProfileForArgument(argument))
1623                                 variable->predict(profile->computeUpdatedPrediction(locker));
1624                         }
1625                         
1626                         Node* setArgument = addToGraph(SetArgument, OpInfo(variable));
1627                         m_currentBlock->variablesAtTail.setOperand(variable->local(), setArgument);
1628                     }
1629                 }
1630             });
1631         if (verbose) {
1632             dataLog("Done inlining (simple).\n");
1633             dataLog("Stack: ", currentCodeOrigin(), "\n");
1634             dataLog("Result: ", result, "\n");
1635         }
1636         return result;
1637     }
1638     
1639     // We need to create some kind of switch over callee. For now we only do this if we believe that
1640     // we're in the top tier. We have two reasons for this: first, it provides us an opportunity to
1641     // do more detailed polyvariant/polymorphic profiling; and second, it reduces compile times in
1642     // the DFG. And by polyvariant profiling we mean polyvariant profiling of *this* call. Note that
1643     // we could improve that aspect of this by doing polymorphic inlining but having the profiling
1644     // also.
1645     if (!isFTL(m_graph.m_plan.mode) || !Options::enablePolymorphicCallInlining()
1646         || InlineCallFrame::isVarargs(kind)) {
1647         if (verbose) {
1648             dataLog("Bailing inlining (hard).\n");
1649             dataLog("Stack: ", currentCodeOrigin(), "\n");
1650         }
1651         return false;
1652     }
1653     
1654     unsigned oldOffset = m_currentIndex;
1655     
1656     bool allAreClosureCalls = true;
1657     bool allAreDirectCalls = true;
1658     for (unsigned i = callLinkStatus.size(); i--;) {
1659         if (callLinkStatus[i].isClosureCall())
1660             allAreDirectCalls = false;
1661         else
1662             allAreClosureCalls = false;
1663     }
1664     
1665     Node* thingToSwitchOn;
1666     if (allAreDirectCalls)
1667         thingToSwitchOn = callTargetNode;
1668     else if (allAreClosureCalls)
1669         thingToSwitchOn = addToGraph(GetExecutable, callTargetNode);
1670     else {
1671         // FIXME: We should be able to handle this case, but it's tricky and we don't know of cases
1672         // where it would be beneficial. It might be best to handle these cases as if all calls were
1673         // closure calls.
1674         // https://bugs.webkit.org/show_bug.cgi?id=136020
1675         if (verbose) {
1676             dataLog("Bailing inlining (mix).\n");
1677             dataLog("Stack: ", currentCodeOrigin(), "\n");
1678         }
1679         return false;
1680     }
1681     
1682     if (verbose) {
1683         dataLog("Doing hard inlining...\n");
1684         dataLog("Stack: ", currentCodeOrigin(), "\n");
1685     }
1686     
1687     int registerOffset = registerOffsetOrFirstFreeReg;
1688     
1689     // This makes me wish that we were in SSA all the time. We need to pick a variable into which to
1690     // store the callee so that it will be accessible to all of the blocks we're about to create. We
1691     // get away with doing an immediate-set here because we wouldn't have performed any side effects
1692     // yet.
1693     if (verbose)
1694         dataLog("Register offset: ", registerOffset);
1695     VirtualRegister calleeReg(registerOffset + JSStack::Callee);
1696     calleeReg = m_inlineStackTop->remapOperand(calleeReg);
1697     if (verbose)
1698         dataLog("Callee is going to be ", calleeReg, "\n");
1699     setDirect(calleeReg, callTargetNode, ImmediateSetWithFlush);
1700     
1701     SwitchData& data = *m_graph.m_switchData.add();
1702     data.kind = SwitchCell;
1703     addToGraph(Switch, OpInfo(&data), thingToSwitchOn);
1704     
1705     BasicBlock* originBlock = m_currentBlock;
1706     if (verbose)
1707         dataLog("Marking ", RawPointer(originBlock), " as linked (origin of poly inline)\n");
1708     originBlock->didLink();
1709     cancelLinkingForBlock(m_inlineStackTop, originBlock);
1710     
1711     // Each inlined callee will have a landing block that it returns at. They should all have jumps
1712     // to the continuation block, which we create last.
1713     Vector<BasicBlock*> landingBlocks;
1714     
1715     // We may force this true if we give up on inlining any of the edges.
1716     bool couldTakeSlowPath = callLinkStatus.couldTakeSlowPath();
1717     
1718     if (verbose)
1719         dataLog("About to loop over functions at ", currentCodeOrigin(), ".\n");
1720     
1721     for (unsigned i = 0; i < callLinkStatus.size(); ++i) {
1722         m_currentIndex = oldOffset;
1723         RefPtr<BasicBlock> block = adoptRef(new BasicBlock(UINT_MAX, m_numArguments, m_numLocals, PNaN));
1724         m_currentBlock = block.get();
1725         m_graph.appendBlock(block);
1726         prepareToParseBlock();
1727         
1728         Node* myCallTargetNode = getDirect(calleeReg);
1729         
1730         bool inliningResult = attemptToInlineCall(
1731             myCallTargetNode, resultOperand, callLinkStatus[i], registerOffset,
1732             argumentCountIncludingThis, nextOffset, kind, CallerLinksManually, prediction,
1733             inliningBalance, [&] (CodeBlock*) { });
1734         
1735         if (!inliningResult) {
1736             // That failed so we let the block die. Nothing interesting should have been added to
1737             // the block. We also give up on inlining any of the (less frequent) callees.
1738             ASSERT(m_currentBlock == block.get());
1739             ASSERT(m_graph.m_blocks.last() == block);
1740             m_graph.killBlockAndItsContents(block.get());
1741             m_graph.m_blocks.removeLast();
1742             
1743             // The fact that inlining failed means we need a slow path.
1744             couldTakeSlowPath = true;
1745             break;
1746         }
1747         
1748         JSCell* thingToCaseOn;
1749         if (allAreDirectCalls)
1750             thingToCaseOn = callLinkStatus[i].nonExecutableCallee();
1751         else {
1752             ASSERT(allAreClosureCalls);
1753             thingToCaseOn = callLinkStatus[i].executable();
1754         }
1755         data.cases.append(SwitchCase(m_graph.freeze(thingToCaseOn), block.get()));
1756         m_currentIndex = nextOffset;
1757         processSetLocalQueue(); // This only comes into play for intrinsics, since normal inlined code will leave an empty queue.
1758         addToGraph(Jump);
1759         if (verbose)
1760             dataLog("Marking ", RawPointer(m_currentBlock), " as linked (tail of poly inlinee)\n");
1761         m_currentBlock->didLink();
1762         landingBlocks.append(m_currentBlock);
1763
1764         if (verbose)
1765             dataLog("Finished inlining ", callLinkStatus[i], " at ", currentCodeOrigin(), ".\n");
1766     }
1767     
1768     RefPtr<BasicBlock> slowPathBlock = adoptRef(
1769         new BasicBlock(UINT_MAX, m_numArguments, m_numLocals, PNaN));
1770     m_currentIndex = oldOffset;
1771     data.fallThrough = BranchTarget(slowPathBlock.get());
1772     m_graph.appendBlock(slowPathBlock);
1773     if (verbose)
1774         dataLog("Marking ", RawPointer(slowPathBlock.get()), " as linked (slow path block)\n");
1775     slowPathBlock->didLink();
1776     prepareToParseBlock();
1777     m_currentBlock = slowPathBlock.get();
1778     Node* myCallTargetNode = getDirect(calleeReg);
1779     if (couldTakeSlowPath) {
1780         addCall(
1781             resultOperand, callOp, OpInfo(), myCallTargetNode, argumentCountIncludingThis,
1782             registerOffset, prediction);
1783     } else {
1784         addToGraph(CheckBadCell);
1785         addToGraph(Phantom, myCallTargetNode);
1786         emitArgumentPhantoms(registerOffset, argumentCountIncludingThis);
1787         
1788         set(VirtualRegister(resultOperand), addToGraph(BottomValue));
1789     }
1790
1791     m_currentIndex = nextOffset;
1792     processSetLocalQueue();
1793     addToGraph(Jump);
1794     landingBlocks.append(m_currentBlock);
1795     
1796     RefPtr<BasicBlock> continuationBlock = adoptRef(
1797         new BasicBlock(UINT_MAX, m_numArguments, m_numLocals, PNaN));
1798     m_graph.appendBlock(continuationBlock);
1799     if (verbose)
1800         dataLog("Adding unlinked block ", RawPointer(continuationBlock.get()), " (continuation)\n");
1801     m_inlineStackTop->m_unlinkedBlocks.append(UnlinkedBlock(continuationBlock.get()));
1802     prepareToParseBlock();
1803     m_currentBlock = continuationBlock.get();
1804     
1805     for (unsigned i = landingBlocks.size(); i--;)
1806         landingBlocks[i]->terminal()->targetBlock() = continuationBlock.get();
1807     
1808     m_currentIndex = oldOffset;
1809     
1810     if (verbose) {
1811         dataLog("Done inlining (hard).\n");
1812         dataLog("Stack: ", currentCodeOrigin(), "\n");
1813     }
1814     return true;
1815 }
1816
1817 template<typename ChecksFunctor>
1818 bool ByteCodeParser::handleMinMax(int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& insertChecks)
1819 {
1820     if (argumentCountIncludingThis == 1) { // Math.min()
1821         insertChecks();
1822         set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN)));
1823         return true;
1824     }
1825      
1826     if (argumentCountIncludingThis == 2) { // Math.min(x)
1827         insertChecks();
1828         Node* result = get(VirtualRegister(virtualRegisterForArgument(1, registerOffset)));
1829         addToGraph(Phantom, Edge(result, NumberUse));
1830         set(VirtualRegister(resultOperand), result);
1831         return true;
1832     }
1833     
1834     if (argumentCountIncludingThis == 3) { // Math.min(x, y)
1835         insertChecks();
1836         set(VirtualRegister(resultOperand), addToGraph(op, get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset))));
1837         return true;
1838     }
1839     
1840     // Don't handle >=3 arguments for now.
1841     return false;
1842 }
1843
1844 template<typename ChecksFunctor>
1845 bool ByteCodeParser::handleIntrinsic(int resultOperand, Intrinsic intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks)
1846 {
1847     switch (intrinsic) {
1848     case AbsIntrinsic: {
1849         if (argumentCountIncludingThis == 1) { // Math.abs()
1850             insertChecks();
1851             set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN)));
1852             return true;
1853         }
1854
1855         if (!MacroAssembler::supportsFloatingPointAbs())
1856             return false;
1857
1858         insertChecks();
1859         Node* node = addToGraph(ArithAbs, get(virtualRegisterForArgument(1, registerOffset)));
1860         if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
1861             node->mergeFlags(NodeMayOverflowInDFG);
1862         set(VirtualRegister(resultOperand), node);
1863         return true;
1864     }
1865
1866     case MinIntrinsic:
1867         return handleMinMax(resultOperand, ArithMin, registerOffset, argumentCountIncludingThis, insertChecks);
1868         
1869     case MaxIntrinsic:
1870         return handleMinMax(resultOperand, ArithMax, registerOffset, argumentCountIncludingThis, insertChecks);
1871
1872     case SqrtIntrinsic:
1873     case CosIntrinsic:
1874     case SinIntrinsic:
1875     case LogIntrinsic: {
1876         if (argumentCountIncludingThis == 1) {
1877             insertChecks();
1878             set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN)));
1879             return true;
1880         }
1881         
1882         switch (intrinsic) {
1883         case SqrtIntrinsic:
1884             insertChecks();
1885             set(VirtualRegister(resultOperand), addToGraph(ArithSqrt, get(virtualRegisterForArgument(1, registerOffset))));
1886             return true;
1887             
1888         case CosIntrinsic:
1889             insertChecks();
1890             set(VirtualRegister(resultOperand), addToGraph(ArithCos, get(virtualRegisterForArgument(1, registerOffset))));
1891             return true;
1892             
1893         case SinIntrinsic:
1894             insertChecks();
1895             set(VirtualRegister(resultOperand), addToGraph(ArithSin, get(virtualRegisterForArgument(1, registerOffset))));
1896             return true;
1897
1898         case LogIntrinsic:
1899             insertChecks();
1900             set(VirtualRegister(resultOperand), addToGraph(ArithLog, get(virtualRegisterForArgument(1, registerOffset))));
1901             return true;
1902             
1903         default:
1904             RELEASE_ASSERT_NOT_REACHED();
1905             return false;
1906         }
1907     }
1908
1909     case PowIntrinsic: {
1910         if (argumentCountIncludingThis < 3) {
1911             // Math.pow() and Math.pow(x) return NaN.
1912             insertChecks();
1913             set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN)));
1914             return true;
1915         }
1916         insertChecks();
1917         VirtualRegister xOperand = virtualRegisterForArgument(1, registerOffset);
1918         VirtualRegister yOperand = virtualRegisterForArgument(2, registerOffset);
1919         set(VirtualRegister(resultOperand), addToGraph(ArithPow, get(xOperand), get(yOperand)));
1920         return true;
1921     }
1922         
1923     case ArrayPushIntrinsic: {
1924         if (argumentCountIncludingThis != 2)
1925             return false;
1926         
1927         ArrayMode arrayMode = getArrayMode(m_currentInstruction[OPCODE_LENGTH(op_call) - 2].u.arrayProfile);
1928         if (!arrayMode.isJSArray())
1929             return false;
1930         switch (arrayMode.type()) {
1931         case Array::Undecided:
1932         case Array::Int32:
1933         case Array::Double:
1934         case Array::Contiguous:
1935         case Array::ArrayStorage: {
1936             insertChecks();
1937             Node* arrayPush = addToGraph(ArrayPush, OpInfo(arrayMode.asWord()), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)));
1938             set(VirtualRegister(resultOperand), arrayPush);
1939             
1940             return true;
1941         }
1942             
1943         default:
1944             return false;
1945         }
1946     }
1947         
1948     case ArrayPopIntrinsic: {
1949         if (argumentCountIncludingThis != 1)
1950             return false;
1951         
1952         ArrayMode arrayMode = getArrayMode(m_currentInstruction[OPCODE_LENGTH(op_call) - 2].u.arrayProfile);
1953         if (!arrayMode.isJSArray())
1954             return false;
1955         switch (arrayMode.type()) {
1956         case Array::Int32:
1957         case Array::Double:
1958         case Array::Contiguous:
1959         case Array::ArrayStorage: {
1960             insertChecks();
1961             Node* arrayPop = addToGraph(ArrayPop, OpInfo(arrayMode.asWord()), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)));
1962             set(VirtualRegister(resultOperand), arrayPop);
1963             return true;
1964         }
1965             
1966         default:
1967             return false;
1968         }
1969     }
1970
1971     case CharCodeAtIntrinsic: {
1972         if (argumentCountIncludingThis != 2)
1973             return false;
1974
1975         insertChecks();
1976         VirtualRegister thisOperand = virtualRegisterForArgument(0, registerOffset);
1977         VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
1978         Node* charCode = addToGraph(StringCharCodeAt, OpInfo(ArrayMode(Array::String).asWord()), get(thisOperand), get(indexOperand));
1979
1980         set(VirtualRegister(resultOperand), charCode);
1981         return true;
1982     }
1983
1984     case CharAtIntrinsic: {
1985         if (argumentCountIncludingThis != 2)
1986             return false;
1987
1988         insertChecks();
1989         VirtualRegister thisOperand = virtualRegisterForArgument(0, registerOffset);
1990         VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
1991         Node* charCode = addToGraph(StringCharAt, OpInfo(ArrayMode(Array::String).asWord()), get(thisOperand), get(indexOperand));
1992
1993         set(VirtualRegister(resultOperand), charCode);
1994         return true;
1995     }
1996     case Clz32Intrinsic: {
1997         insertChecks();
1998         if (argumentCountIncludingThis == 1)
1999             set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_graph.freeze(jsNumber(32)))));
2000         else {
2001             Node* operand = get(virtualRegisterForArgument(1, registerOffset));
2002             set(VirtualRegister(resultOperand), addToGraph(ArithClz32, operand));
2003         }
2004         return true;
2005     }
2006     case FromCharCodeIntrinsic: {
2007         if (argumentCountIncludingThis != 2)
2008             return false;
2009
2010         insertChecks();
2011         VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
2012         Node* charCode = addToGraph(StringFromCharCode, get(indexOperand));
2013
2014         set(VirtualRegister(resultOperand), charCode);
2015
2016         return true;
2017     }
2018
2019     case RegExpExecIntrinsic: {
2020         if (argumentCountIncludingThis != 2)
2021             return false;
2022         
2023         insertChecks();
2024         Node* regExpExec = addToGraph(RegExpExec, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)));
2025         set(VirtualRegister(resultOperand), regExpExec);
2026         
2027         return true;
2028     }
2029         
2030     case RegExpTestIntrinsic: {
2031         if (argumentCountIncludingThis != 2)
2032             return false;
2033         
2034         insertChecks();
2035         Node* regExpExec = addToGraph(RegExpTest, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)));
2036         set(VirtualRegister(resultOperand), regExpExec);
2037         
2038         return true;
2039     }
2040     case RoundIntrinsic: {
2041         if (argumentCountIncludingThis == 1) {
2042             insertChecks();
2043             set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN)));
2044             return true;
2045         }
2046         if (argumentCountIncludingThis == 2) {
2047             insertChecks();
2048             Node* operand = get(virtualRegisterForArgument(1, registerOffset));
2049             Node* roundNode = addToGraph(ArithRound, OpInfo(0), OpInfo(prediction), operand);
2050             set(VirtualRegister(resultOperand), roundNode);
2051             return true;
2052         }
2053         return false;
2054     }
2055     case IMulIntrinsic: {
2056         if (argumentCountIncludingThis != 3)
2057             return false;
2058         insertChecks();
2059         VirtualRegister leftOperand = virtualRegisterForArgument(1, registerOffset);
2060         VirtualRegister rightOperand = virtualRegisterForArgument(2, registerOffset);
2061         Node* left = get(leftOperand);
2062         Node* right = get(rightOperand);
2063         set(VirtualRegister(resultOperand), addToGraph(ArithIMul, left, right));
2064         return true;
2065     }
2066         
2067     case FRoundIntrinsic: {
2068         if (argumentCountIncludingThis != 2)
2069             return false;
2070         insertChecks();
2071         VirtualRegister operand = virtualRegisterForArgument(1, registerOffset);
2072         set(VirtualRegister(resultOperand), addToGraph(ArithFRound, get(operand)));
2073         return true;
2074     }
2075         
2076     case DFGTrueIntrinsic: {
2077         insertChecks();
2078         set(VirtualRegister(resultOperand), jsConstant(jsBoolean(true)));
2079         return true;
2080     }
2081         
2082     case OSRExitIntrinsic: {
2083         insertChecks();
2084         addToGraph(ForceOSRExit);
2085         set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantUndefined)));
2086         return true;
2087     }
2088         
2089     case IsFinalTierIntrinsic: {
2090         insertChecks();
2091         set(VirtualRegister(resultOperand),
2092             jsConstant(jsBoolean(Options::useFTLJIT() ? isFTL(m_graph.m_plan.mode) : true)));
2093         return true;
2094     }
2095         
2096     case SetInt32HeapPredictionIntrinsic: {
2097         insertChecks();
2098         for (int i = 1; i < argumentCountIncludingThis; ++i) {
2099             Node* node = get(virtualRegisterForArgument(i, registerOffset));
2100             if (node->hasHeapPrediction())
2101                 node->setHeapPrediction(SpecInt32);
2102         }
2103         set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantUndefined)));
2104         return true;
2105     }
2106         
2107     case CheckInt32Intrinsic: {
2108         insertChecks();
2109         for (int i = 1; i < argumentCountIncludingThis; ++i) {
2110             Node* node = get(virtualRegisterForArgument(i, registerOffset));
2111             addToGraph(Phantom, Edge(node, Int32Use));
2112         }
2113         set(VirtualRegister(resultOperand), jsConstant(jsBoolean(true)));
2114         return true;
2115     }
2116         
2117     case FiatInt52Intrinsic: {
2118         if (argumentCountIncludingThis != 2)
2119             return false;
2120         insertChecks();
2121         VirtualRegister operand = virtualRegisterForArgument(1, registerOffset);
2122         if (enableInt52())
2123             set(VirtualRegister(resultOperand), addToGraph(FiatInt52, get(operand)));
2124         else
2125             set(VirtualRegister(resultOperand), get(operand));
2126         return true;
2127     }
2128         
2129     default:
2130         return false;
2131     }
2132 }
2133
2134 template<typename ChecksFunctor>
2135 bool ByteCodeParser::handleTypedArrayConstructor(
2136     int resultOperand, InternalFunction* function, int registerOffset,
2137     int argumentCountIncludingThis, TypedArrayType type, const ChecksFunctor& insertChecks)
2138 {
2139     if (!isTypedView(type))
2140         return false;
2141     
2142     if (function->classInfo() != constructorClassInfoForType(type))
2143         return false;
2144     
2145     if (function->globalObject() != m_inlineStackTop->m_codeBlock->globalObject())
2146         return false;
2147     
2148     // We only have an intrinsic for the case where you say:
2149     //
2150     // new FooArray(blah);
2151     //
2152     // Of course, 'blah' could be any of the following:
2153     //
2154     // - Integer, indicating that you want to allocate an array of that length.
2155     //   This is the thing we're hoping for, and what we can actually do meaningful
2156     //   optimizations for.
2157     //
2158     // - Array buffer, indicating that you want to create a view onto that _entire_
2159     //   buffer.
2160     //
2161     // - Non-buffer object, indicating that you want to create a copy of that
2162     //   object by pretending that it quacks like an array.
2163     //
2164     // - Anything else, indicating that you want to have an exception thrown at
2165     //   you.
2166     //
2167     // The intrinsic, NewTypedArray, will behave as if it could do any of these
2168     // things up until we do Fixup. Thereafter, if child1 (i.e. 'blah') is
2169     // predicted Int32, then we lock it in as a normal typed array allocation.
2170     // Otherwise, NewTypedArray turns into a totally opaque function call that
2171     // may clobber the world - by virtue of it accessing properties on what could
2172     // be an object.
2173     //
2174     // Note that although the generic form of NewTypedArray sounds sort of awful,
2175     // it is actually quite likely to be more efficient than a fully generic
2176     // Construct. So, we might want to think about making NewTypedArray variadic,
2177     // or else making Construct not super slow.
2178     
2179     if (argumentCountIncludingThis != 2)
2180         return false;
2181
2182     insertChecks();
2183     set(VirtualRegister(resultOperand),
2184         addToGraph(NewTypedArray, OpInfo(type), get(virtualRegisterForArgument(1, registerOffset))));
2185     return true;
2186 }
2187
2188 template<typename ChecksFunctor>
2189 bool ByteCodeParser::handleConstantInternalFunction(
2190     int resultOperand, InternalFunction* function, int registerOffset,
2191     int argumentCountIncludingThis, CodeSpecializationKind kind, const ChecksFunctor& insertChecks)
2192 {
2193     if (verbose)
2194         dataLog("    Handling constant internal function ", JSValue(function), "\n");
2195     
2196     // If we ever find that we have a lot of internal functions that we specialize for,
2197     // then we should probably have some sort of hashtable dispatch, or maybe even
2198     // dispatch straight through the MethodTable of the InternalFunction. But for now,
2199     // it seems that this case is hit infrequently enough, and the number of functions
2200     // we know about is small enough, that having just a linear cascade of if statements
2201     // is good enough.
2202     
2203     if (function->classInfo() == ArrayConstructor::info()) {
2204         if (function->globalObject() != m_inlineStackTop->m_codeBlock->globalObject())
2205             return false;
2206         
2207         insertChecks();
2208         if (argumentCountIncludingThis == 2) {
2209             set(VirtualRegister(resultOperand),
2210                 addToGraph(NewArrayWithSize, OpInfo(ArrayWithUndecided), get(virtualRegisterForArgument(1, registerOffset))));
2211             return true;
2212         }
2213         
2214         // FIXME: Array constructor should use "this" as newTarget.
2215         for (int i = 1; i < argumentCountIncludingThis; ++i)
2216             addVarArgChild(get(virtualRegisterForArgument(i, registerOffset)));
2217         set(VirtualRegister(resultOperand),
2218             addToGraph(Node::VarArg, NewArray, OpInfo(ArrayWithUndecided), OpInfo(0)));
2219         return true;
2220     }
2221     
2222     if (function->classInfo() == StringConstructor::info()) {
2223         insertChecks();
2224         
2225         Node* result;
2226         
2227         if (argumentCountIncludingThis <= 1)
2228             result = jsConstant(m_vm->smallStrings.emptyString());
2229         else
2230             result = addToGraph(CallStringConstructor, get(virtualRegisterForArgument(1, registerOffset)));
2231         
2232         if (kind == CodeForConstruct)
2233             result = addToGraph(NewStringObject, OpInfo(function->globalObject()->stringObjectStructure()), result);
2234         
2235         set(VirtualRegister(resultOperand), result);
2236         return true;
2237     }
2238     
2239     for (unsigned typeIndex = 0; typeIndex < NUMBER_OF_TYPED_ARRAY_TYPES; ++typeIndex) {
2240         bool result = handleTypedArrayConstructor(
2241             resultOperand, function, registerOffset, argumentCountIncludingThis,
2242             indexToTypedArrayType(typeIndex), insertChecks);
2243         if (result)
2244             return true;
2245     }
2246     
2247     return false;
2248 }
2249
2250 Node* ByteCodeParser::handleGetByOffset(SpeculatedType prediction, Node* base, const StructureSet& structureSet, unsigned identifierNumber, PropertyOffset offset, NodeType op)
2251 {
2252     if (base->hasConstant()) {
2253         if (JSValue constant = m_graph.tryGetConstantProperty(base->asJSValue(), structureSet, offset)) {
2254             addToGraph(Phantom, base);
2255             return weakJSConstant(constant);
2256         }
2257     }
2258     
2259     Node* propertyStorage;
2260     if (isInlineOffset(offset))
2261         propertyStorage = base;
2262     else
2263         propertyStorage = addToGraph(GetButterfly, base);
2264     
2265     StorageAccessData* data = m_graph.m_storageAccessData.add();
2266     data->offset = offset;
2267     data->identifierNumber = identifierNumber;
2268     
2269     Node* getByOffset = addToGraph(op, OpInfo(data), OpInfo(prediction), propertyStorage, base);
2270
2271     return getByOffset;
2272 }
2273
2274 Node* ByteCodeParser::handlePutByOffset(Node* base, unsigned identifier, PropertyOffset offset, Node* value)
2275 {
2276     Node* propertyStorage;
2277     if (isInlineOffset(offset))
2278         propertyStorage = base;
2279     else
2280         propertyStorage = addToGraph(GetButterfly, base);
2281     
2282     StorageAccessData* data = m_graph.m_storageAccessData.add();
2283     data->offset = offset;
2284     data->identifierNumber = identifier;
2285     
2286     Node* result = addToGraph(PutByOffset, OpInfo(data), propertyStorage, base, value);
2287     
2288     return result;
2289 }
2290
2291 void ByteCodeParser::emitChecks(const ConstantStructureCheckVector& vector)
2292 {
2293     for (unsigned i = 0; i < vector.size(); ++i)
2294         cellConstantWithStructureCheck(vector[i].constant(), vector[i].structure());
2295 }
2296
2297 void ByteCodeParser::handleGetById(
2298     int destinationOperand, SpeculatedType prediction, Node* base, unsigned identifierNumber,
2299     const GetByIdStatus& getByIdStatus)
2300 {
2301     NodeType getById = getByIdStatus.makesCalls() ? GetByIdFlush : GetById;
2302     
2303     if (!getByIdStatus.isSimple() || !getByIdStatus.numVariants() || !Options::enableAccessInlining()) {
2304         set(VirtualRegister(destinationOperand),
2305             addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base));
2306         return;
2307     }
2308     
2309     if (getByIdStatus.numVariants() > 1) {
2310         if (getByIdStatus.makesCalls() || !isFTL(m_graph.m_plan.mode)
2311             || !Options::enablePolymorphicAccessInlining()) {
2312             set(VirtualRegister(destinationOperand),
2313                 addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base));
2314             return;
2315         }
2316         
2317         if (m_graph.compilation())
2318             m_graph.compilation()->noticeInlinedGetById();
2319     
2320         // 1) Emit prototype structure checks for all chains. This could sort of maybe not be
2321         //    optimal, if there is some rarely executed case in the chain that requires a lot
2322         //    of checks and those checks are not watchpointable.
2323         for (unsigned variantIndex = getByIdStatus.numVariants(); variantIndex--;)
2324             emitChecks(getByIdStatus[variantIndex].constantChecks());
2325         
2326         // 2) Emit a MultiGetByOffset
2327         MultiGetByOffsetData* data = m_graph.m_multiGetByOffsetData.add();
2328         data->variants = getByIdStatus.variants();
2329         data->identifierNumber = identifierNumber;
2330         set(VirtualRegister(destinationOperand),
2331             addToGraph(MultiGetByOffset, OpInfo(data), OpInfo(prediction), base));
2332         return;
2333     }
2334     
2335     ASSERT(getByIdStatus.numVariants() == 1);
2336     GetByIdVariant variant = getByIdStatus[0];
2337                 
2338     if (m_graph.compilation())
2339         m_graph.compilation()->noticeInlinedGetById();
2340     
2341     Node* originalBase = base;
2342                 
2343     addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(variant.structureSet())), base);
2344     
2345     emitChecks(variant.constantChecks());
2346
2347     if (variant.alternateBase())
2348         base = weakJSConstant(variant.alternateBase());
2349     
2350     // Unless we want bugs like https://bugs.webkit.org/show_bug.cgi?id=88783, we need to
2351     // ensure that the base of the original get_by_id is kept alive until we're done with
2352     // all of the speculations. We only insert the Phantom if there had been a CheckStructure
2353     // on something other than the base following the CheckStructure on base.
2354     if (originalBase != base)
2355         addToGraph(Phantom, originalBase);
2356     
2357     Node* loadedValue = handleGetByOffset(
2358         variant.callLinkStatus() ? SpecCellOther : prediction,
2359         base, variant.baseStructure(), identifierNumber, variant.offset(),
2360         variant.callLinkStatus() ? GetGetterSetterByOffset : GetByOffset);
2361     
2362     if (!variant.callLinkStatus()) {
2363         set(VirtualRegister(destinationOperand), loadedValue);
2364         return;
2365     }
2366     
2367     Node* getter = addToGraph(GetGetter, loadedValue);
2368     
2369     // Make a call. We don't try to get fancy with using the smallest operand number because
2370     // the stack layout phase should compress the stack anyway.
2371     
2372     unsigned numberOfParameters = 0;
2373     numberOfParameters++; // The 'this' argument.
2374     numberOfParameters++; // True return PC.
2375     
2376     // Start with a register offset that corresponds to the last in-use register.
2377     int registerOffset = virtualRegisterForLocal(
2378         m_inlineStackTop->m_profiledBlock->m_numCalleeRegisters - 1).offset();
2379     registerOffset -= numberOfParameters;
2380     registerOffset -= JSStack::CallFrameHeaderSize;
2381     
2382     // Get the alignment right.
2383     registerOffset = -WTF::roundUpToMultipleOf(
2384         stackAlignmentRegisters(),
2385         -registerOffset);
2386     
2387     ensureLocals(
2388         m_inlineStackTop->remapOperand(
2389             VirtualRegister(registerOffset)).toLocal());
2390     
2391     // Issue SetLocals. This has two effects:
2392     // 1) That's how handleCall() sees the arguments.
2393     // 2) If we inline then this ensures that the arguments are flushed so that if you use
2394     //    the dreaded arguments object on the getter, the right things happen. Well, sort of -
2395     //    since we only really care about 'this' in this case. But we're not going to take that
2396     //    shortcut.
2397     int nextRegister = registerOffset + JSStack::CallFrameHeaderSize;
2398     set(VirtualRegister(nextRegister++), originalBase, ImmediateNakedSet);
2399     
2400     handleCall(
2401         destinationOperand, Call, InlineCallFrame::GetterCall, OPCODE_LENGTH(op_get_by_id),
2402         getter, numberOfParameters - 1, registerOffset, *variant.callLinkStatus(), prediction);
2403 }
2404
2405 void ByteCodeParser::emitPutById(
2406     Node* base, unsigned identifierNumber, Node* value, const PutByIdStatus& putByIdStatus, bool isDirect)
2407 {
2408     if (isDirect)
2409         addToGraph(PutByIdDirect, OpInfo(identifierNumber), base, value);
2410     else
2411         addToGraph(putByIdStatus.makesCalls() ? PutByIdFlush : PutById, OpInfo(identifierNumber), base, value);
2412 }
2413
2414 void ByteCodeParser::handlePutById(
2415     Node* base, unsigned identifierNumber, Node* value,
2416     const PutByIdStatus& putByIdStatus, bool isDirect)
2417 {
2418     if (!putByIdStatus.isSimple() || !putByIdStatus.numVariants() || !Options::enableAccessInlining()) {
2419         if (!putByIdStatus.isSet())
2420             addToGraph(ForceOSRExit);
2421         emitPutById(base, identifierNumber, value, putByIdStatus, isDirect);
2422         return;
2423     }
2424     
2425     if (putByIdStatus.numVariants() > 1) {
2426         if (!isFTL(m_graph.m_plan.mode) || putByIdStatus.makesCalls()
2427             || !Options::enablePolymorphicAccessInlining()) {
2428             emitPutById(base, identifierNumber, value, putByIdStatus, isDirect);
2429             return;
2430         }
2431         
2432         if (m_graph.compilation())
2433             m_graph.compilation()->noticeInlinedPutById();
2434         
2435         if (!isDirect) {
2436             for (unsigned variantIndex = putByIdStatus.numVariants(); variantIndex--;) {
2437                 if (putByIdStatus[variantIndex].kind() != PutByIdVariant::Transition)
2438                     continue;
2439                 emitChecks(putByIdStatus[variantIndex].constantChecks());
2440             }
2441         }
2442         
2443         MultiPutByOffsetData* data = m_graph.m_multiPutByOffsetData.add();
2444         data->variants = putByIdStatus.variants();
2445         data->identifierNumber = identifierNumber;
2446         addToGraph(MultiPutByOffset, OpInfo(data), base, value);
2447         return;
2448     }
2449     
2450     ASSERT(putByIdStatus.numVariants() == 1);
2451     const PutByIdVariant& variant = putByIdStatus[0];
2452     
2453     switch (variant.kind()) {
2454     case PutByIdVariant::Replace: {
2455         addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(variant.structure())), base);
2456         handlePutByOffset(base, identifierNumber, variant.offset(), value);
2457         if (m_graph.compilation())
2458             m_graph.compilation()->noticeInlinedPutById();
2459         return;
2460     }
2461     
2462     case PutByIdVariant::Transition: {
2463         addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(variant.oldStructure())), base);
2464         emitChecks(variant.constantChecks());
2465
2466         ASSERT(variant.oldStructureForTransition()->transitionWatchpointSetHasBeenInvalidated());
2467     
2468         Node* propertyStorage;
2469         Transition* transition = m_graph.m_transitions.add(
2470             variant.oldStructureForTransition(), variant.newStructure());
2471
2472         if (variant.reallocatesStorage()) {
2473
2474             // If we're growing the property storage then it must be because we're
2475             // storing into the out-of-line storage.
2476             ASSERT(!isInlineOffset(variant.offset()));
2477
2478             if (!variant.oldStructureForTransition()->outOfLineCapacity()) {
2479                 propertyStorage = addToGraph(
2480                     AllocatePropertyStorage, OpInfo(transition), base);
2481             } else {
2482                 propertyStorage = addToGraph(
2483                     ReallocatePropertyStorage, OpInfo(transition),
2484                     base, addToGraph(GetButterfly, base));
2485             }
2486         } else {
2487             if (isInlineOffset(variant.offset()))
2488                 propertyStorage = base;
2489             else
2490                 propertyStorage = addToGraph(GetButterfly, base);
2491         }
2492
2493         StorageAccessData* data = m_graph.m_storageAccessData.add();
2494         data->offset = variant.offset();
2495         data->identifierNumber = identifierNumber;
2496         
2497         addToGraph(
2498             PutByOffset,
2499             OpInfo(data),
2500             propertyStorage,
2501             base,
2502             value);
2503
2504         // FIXME: PutStructure goes last until we fix either
2505         // https://bugs.webkit.org/show_bug.cgi?id=142921 or
2506         // https://bugs.webkit.org/show_bug.cgi?id=142924.
2507         addToGraph(PutStructure, OpInfo(transition), base);
2508
2509         if (m_graph.compilation())
2510             m_graph.compilation()->noticeInlinedPutById();
2511         return;
2512     }
2513         
2514     case PutByIdVariant::Setter: {
2515         Node* originalBase = base;
2516         
2517         addToGraph(
2518             CheckStructure, OpInfo(m_graph.addStructureSet(variant.structure())), base);
2519         
2520         emitChecks(variant.constantChecks());
2521         
2522         if (variant.alternateBase())
2523             base = weakJSConstant(variant.alternateBase());
2524         
2525         Node* loadedValue = handleGetByOffset(
2526             SpecCellOther, base, variant.baseStructure(), identifierNumber, variant.offset(),
2527             GetGetterSetterByOffset);
2528         
2529         Node* setter = addToGraph(GetSetter, loadedValue);
2530         
2531         // Make a call. We don't try to get fancy with using the smallest operand number because
2532         // the stack layout phase should compress the stack anyway.
2533     
2534         unsigned numberOfParameters = 0;
2535         numberOfParameters++; // The 'this' argument.
2536         numberOfParameters++; // The new value.
2537         numberOfParameters++; // True return PC.
2538     
2539         // Start with a register offset that corresponds to the last in-use register.
2540         int registerOffset = virtualRegisterForLocal(
2541             m_inlineStackTop->m_profiledBlock->m_numCalleeRegisters - 1).offset();
2542         registerOffset -= numberOfParameters;
2543         registerOffset -= JSStack::CallFrameHeaderSize;
2544     
2545         // Get the alignment right.
2546         registerOffset = -WTF::roundUpToMultipleOf(
2547             stackAlignmentRegisters(),
2548             -registerOffset);
2549     
2550         ensureLocals(
2551             m_inlineStackTop->remapOperand(
2552                 VirtualRegister(registerOffset)).toLocal());
2553     
2554         int nextRegister = registerOffset + JSStack::CallFrameHeaderSize;
2555         set(VirtualRegister(nextRegister++), originalBase, ImmediateNakedSet);
2556         set(VirtualRegister(nextRegister++), value, ImmediateNakedSet);
2557     
2558         handleCall(
2559             VirtualRegister().offset(), Call, InlineCallFrame::SetterCall,
2560             OPCODE_LENGTH(op_put_by_id), setter, numberOfParameters - 1, registerOffset,
2561             *variant.callLinkStatus(), SpecOther);
2562         return;
2563     }
2564     
2565     default: {
2566         emitPutById(base, identifierNumber, value, putByIdStatus, isDirect);
2567         return;
2568     } }
2569 }
2570
2571 void ByteCodeParser::prepareToParseBlock()
2572 {
2573     clearCaches();
2574     ASSERT(m_setLocalQueue.isEmpty());
2575 }
2576
2577 void ByteCodeParser::clearCaches()
2578 {
2579     m_constants.resize(0);
2580 }
2581
2582 bool ByteCodeParser::parseBlock(unsigned limit)
2583 {
2584     bool shouldContinueParsing = true;
2585
2586     Interpreter* interpreter = m_vm->interpreter;
2587     Instruction* instructionsBegin = m_inlineStackTop->m_codeBlock->instructions().begin();
2588     unsigned blockBegin = m_currentIndex;
2589     
2590     // If we are the first basic block, introduce markers for arguments. This allows
2591     // us to track if a use of an argument may use the actual argument passed, as
2592     // opposed to using a value we set explicitly.
2593     if (m_currentBlock == m_graph.block(0) && !inlineCallFrame()) {
2594         m_graph.m_arguments.resize(m_numArguments);
2595         for (unsigned argument = 0; argument < m_numArguments; ++argument) {
2596             VariableAccessData* variable = newVariableAccessData(
2597                 virtualRegisterForArgument(argument));
2598             variable->mergeStructureCheckHoistingFailed(
2599                 m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
2600             variable->mergeCheckArrayHoistingFailed(
2601                 m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIndexingType));
2602             
2603             Node* setArgument = addToGraph(SetArgument, OpInfo(variable));
2604             m_graph.m_arguments[argument] = setArgument;
2605             m_currentBlock->variablesAtTail.setArgumentFirstTime(argument, setArgument);
2606         }
2607     }
2608
2609     while (true) {
2610         processSetLocalQueue();
2611         
2612         // Don't extend over jump destinations.
2613         if (m_currentIndex == limit) {
2614             // Ordinarily we want to plant a jump. But refuse to do this if the block is
2615             // empty. This is a special case for inlining, which might otherwise create
2616             // some empty blocks in some cases. When parseBlock() returns with an empty
2617             // block, it will get repurposed instead of creating a new one. Note that this
2618             // logic relies on every bytecode resulting in one or more nodes, which would
2619             // be true anyway except for op_loop_hint, which emits a Phantom to force this
2620             // to be true.
2621             if (!m_currentBlock->isEmpty())
2622                 addToGraph(Jump, OpInfo(m_currentIndex));
2623             return shouldContinueParsing;
2624         }
2625         
2626         // Switch on the current bytecode opcode.
2627         Instruction* currentInstruction = instructionsBegin + m_currentIndex;
2628         m_currentInstruction = currentInstruction; // Some methods want to use this, and we'd rather not thread it through calls.
2629         OpcodeID opcodeID = interpreter->getOpcodeID(currentInstruction->u.opcode);
2630         
2631         if (Options::verboseDFGByteCodeParsing())
2632             dataLog("    parsing ", currentCodeOrigin(), "\n");
2633         
2634         if (m_graph.compilation()) {
2635             addToGraph(CountExecution, OpInfo(m_graph.compilation()->executionCounterFor(
2636                 Profiler::OriginStack(*m_vm->m_perBytecodeProfiler, m_codeBlock, currentCodeOrigin()))));
2637         }
2638         
2639         switch (opcodeID) {
2640
2641         // === Function entry opcodes ===
2642
2643         case op_enter: {
2644             Node* undefined = addToGraph(JSConstant, OpInfo(m_constantUndefined));
2645             // Initialize all locals to undefined.
2646             for (int i = 0; i < m_inlineStackTop->m_codeBlock->m_numVars; ++i)
2647                 set(virtualRegisterForLocal(i), undefined, ImmediateNakedSet);
2648             NEXT_OPCODE(op_enter);
2649         }
2650             
2651         case op_to_this: {
2652             Node* op1 = getThis();
2653             if (op1->op() != ToThis) {
2654                 Structure* cachedStructure = currentInstruction[2].u.structure.get();
2655                 if (currentInstruction[2].u.toThisStatus != ToThisOK
2656                     || !cachedStructure
2657                     || cachedStructure->classInfo()->methodTable.toThis != JSObject::info()->methodTable.toThis
2658                     || m_inlineStackTop->m_profiledBlock->couldTakeSlowCase(m_currentIndex)
2659                     || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)
2660                     || (op1->op() == GetLocal && op1->variableAccessData()->structureCheckHoistingFailed())) {
2661                     setThis(addToGraph(ToThis, op1));
2662                 } else {
2663                     addToGraph(
2664                         CheckStructure,
2665                         OpInfo(m_graph.addStructureSet(cachedStructure)),
2666                         op1);
2667                 }
2668             }
2669             NEXT_OPCODE(op_to_this);
2670         }
2671
2672         case op_create_this: {
2673             int calleeOperand = currentInstruction[2].u.operand;
2674             Node* callee = get(VirtualRegister(calleeOperand));
2675
2676             JSFunction* function = callee->dynamicCastConstant<JSFunction*>();
2677             if (!function) {
2678                 JSCell* cachedFunction = currentInstruction[4].u.jsCell.unvalidatedGet();
2679                 if (cachedFunction
2680                     && cachedFunction != JSCell::seenMultipleCalleeObjects()
2681                     && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell)) {
2682                     ASSERT(cachedFunction->inherits(JSFunction::info()));
2683
2684                     FrozenValue* frozen = m_graph.freeze(cachedFunction);
2685                     addToGraph(CheckCell, OpInfo(frozen), callee);
2686                     set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(JSConstant, OpInfo(frozen)));
2687
2688                     function = static_cast<JSFunction*>(cachedFunction);
2689                 }
2690             }
2691
2692             bool alreadyEmitted = false;
2693             if (function) {
2694                 if (FunctionRareData* rareData = function->rareData()) {
2695                     if (Structure* structure = rareData->allocationStructure()) {
2696                         m_graph.freeze(rareData);
2697                         m_graph.watchpoints().addLazily(rareData->allocationProfileWatchpointSet());
2698                         // The callee is still live up to this point.
2699                         addToGraph(Phantom, callee);
2700                         set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(NewObject, OpInfo(structure)));
2701                         alreadyEmitted = true;
2702                     }
2703                 }
2704             }
2705             if (!alreadyEmitted) {
2706                 set(VirtualRegister(currentInstruction[1].u.operand),
2707                     addToGraph(CreateThis, OpInfo(currentInstruction[3].u.operand), callee));
2708             }
2709             NEXT_OPCODE(op_create_this);
2710         }
2711
2712         case op_new_object: {
2713             set(VirtualRegister(currentInstruction[1].u.operand),
2714                 addToGraph(NewObject,
2715                     OpInfo(currentInstruction[3].u.objectAllocationProfile->structure())));
2716             NEXT_OPCODE(op_new_object);
2717         }
2718             
2719         case op_new_array: {
2720             int startOperand = currentInstruction[2].u.operand;
2721             int numOperands = currentInstruction[3].u.operand;
2722             ArrayAllocationProfile* profile = currentInstruction[4].u.arrayAllocationProfile;
2723             for (int operandIdx = startOperand; operandIdx > startOperand - numOperands; --operandIdx)
2724                 addVarArgChild(get(VirtualRegister(operandIdx)));
2725             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(Node::VarArg, NewArray, OpInfo(profile->selectIndexingType()), OpInfo(0)));
2726             NEXT_OPCODE(op_new_array);
2727         }
2728             
2729         case op_new_array_with_size: {
2730             int lengthOperand = currentInstruction[2].u.operand;
2731             ArrayAllocationProfile* profile = currentInstruction[3].u.arrayAllocationProfile;
2732             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(NewArrayWithSize, OpInfo(profile->selectIndexingType()), get(VirtualRegister(lengthOperand))));
2733             NEXT_OPCODE(op_new_array_with_size);
2734         }
2735             
2736         case op_new_array_buffer: {
2737             int startConstant = currentInstruction[2].u.operand;
2738             int numConstants = currentInstruction[3].u.operand;
2739             ArrayAllocationProfile* profile = currentInstruction[4].u.arrayAllocationProfile;
2740             NewArrayBufferData data;
2741             data.startConstant = m_inlineStackTop->m_constantBufferRemap[startConstant];
2742             data.numConstants = numConstants;
2743             data.indexingType = profile->selectIndexingType();
2744
2745             // If this statement has never executed, we'll have the wrong indexing type in the profile.
2746             for (int i = 0; i < numConstants; ++i) {
2747                 data.indexingType =
2748                     leastUpperBoundOfIndexingTypeAndValue(
2749                         data.indexingType,
2750                         m_codeBlock->constantBuffer(data.startConstant)[i]);
2751             }
2752             
2753             m_graph.m_newArrayBufferData.append(data);
2754             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(NewArrayBuffer, OpInfo(&m_graph.m_newArrayBufferData.last())));
2755             NEXT_OPCODE(op_new_array_buffer);
2756         }
2757             
2758         case op_new_regexp: {
2759             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(NewRegexp, OpInfo(currentInstruction[2].u.operand)));
2760             NEXT_OPCODE(op_new_regexp);
2761         }
2762             
2763         // === Bitwise operations ===
2764
2765         case op_bitand: {
2766             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2767             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2768             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(BitAnd, op1, op2));
2769             NEXT_OPCODE(op_bitand);
2770         }
2771
2772         case op_bitor: {
2773             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2774             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2775             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(BitOr, op1, op2));
2776             NEXT_OPCODE(op_bitor);
2777         }
2778
2779         case op_bitxor: {
2780             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2781             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2782             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(BitXor, op1, op2));
2783             NEXT_OPCODE(op_bitxor);
2784         }
2785
2786         case op_rshift: {
2787             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2788             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2789             set(VirtualRegister(currentInstruction[1].u.operand),
2790                 addToGraph(BitRShift, op1, op2));
2791             NEXT_OPCODE(op_rshift);
2792         }
2793
2794         case op_lshift: {
2795             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2796             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2797             set(VirtualRegister(currentInstruction[1].u.operand),
2798                 addToGraph(BitLShift, op1, op2));
2799             NEXT_OPCODE(op_lshift);
2800         }
2801
2802         case op_urshift: {
2803             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2804             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2805             set(VirtualRegister(currentInstruction[1].u.operand),
2806                 addToGraph(BitURShift, op1, op2));
2807             NEXT_OPCODE(op_urshift);
2808         }
2809             
2810         case op_unsigned: {
2811             set(VirtualRegister(currentInstruction[1].u.operand),
2812                 makeSafe(addToGraph(UInt32ToNumber, get(VirtualRegister(currentInstruction[2].u.operand)))));
2813             NEXT_OPCODE(op_unsigned);
2814         }
2815
2816         // === Increment/Decrement opcodes ===
2817
2818         case op_inc: {
2819             int srcDst = currentInstruction[1].u.operand;
2820             VirtualRegister srcDstVirtualRegister = VirtualRegister(srcDst);
2821             Node* op = get(srcDstVirtualRegister);
2822             set(srcDstVirtualRegister, makeSafe(addToGraph(ArithAdd, op, addToGraph(JSConstant, OpInfo(m_constantOne)))));
2823             NEXT_OPCODE(op_inc);
2824         }
2825
2826         case op_dec: {
2827             int srcDst = currentInstruction[1].u.operand;
2828             VirtualRegister srcDstVirtualRegister = VirtualRegister(srcDst);
2829             Node* op = get(srcDstVirtualRegister);
2830             set(srcDstVirtualRegister, makeSafe(addToGraph(ArithSub, op, addToGraph(JSConstant, OpInfo(m_constantOne)))));
2831             NEXT_OPCODE(op_dec);
2832         }
2833
2834         // === Arithmetic operations ===
2835
2836         case op_add: {
2837             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2838             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2839             if (op1->hasNumberResult() && op2->hasNumberResult())
2840                 set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithAdd, op1, op2)));
2841             else
2842                 set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ValueAdd, op1, op2)));
2843             NEXT_OPCODE(op_add);
2844         }
2845
2846         case op_sub: {
2847             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2848             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2849             set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithSub, op1, op2)));
2850             NEXT_OPCODE(op_sub);
2851         }
2852
2853         case op_negate: {
2854             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2855             set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithNegate, op1)));
2856             NEXT_OPCODE(op_negate);
2857         }
2858
2859         case op_mul: {
2860             // Multiply requires that the inputs are not truncated, unfortunately.
2861             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2862             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2863             set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithMul, op1, op2)));
2864             NEXT_OPCODE(op_mul);
2865         }
2866
2867         case op_mod: {
2868             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2869             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2870             set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithMod, op1, op2)));
2871             NEXT_OPCODE(op_mod);
2872         }
2873
2874         case op_div: {
2875             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2876             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2877             set(VirtualRegister(currentInstruction[1].u.operand), makeDivSafe(addToGraph(ArithDiv, op1, op2)));
2878             NEXT_OPCODE(op_div);
2879         }
2880
2881         // === Misc operations ===
2882
2883         case op_debug:
2884             addToGraph(Breakpoint);
2885             NEXT_OPCODE(op_debug);
2886
2887         case op_profile_will_call: {
2888             addToGraph(ProfileWillCall);
2889             NEXT_OPCODE(op_profile_will_call);
2890         }
2891
2892         case op_profile_did_call: {
2893             addToGraph(ProfileDidCall);
2894             NEXT_OPCODE(op_profile_did_call);
2895         }
2896
2897         case op_mov: {
2898             Node* op = get(VirtualRegister(currentInstruction[2].u.operand));
2899             set(VirtualRegister(currentInstruction[1].u.operand), op);
2900             NEXT_OPCODE(op_mov);
2901         }
2902
2903         case op_check_tdz: {
2904             Node* op = get(VirtualRegister(currentInstruction[1].u.operand));
2905             addToGraph(CheckNotEmpty, op);
2906             NEXT_OPCODE(op_check_tdz);
2907         }
2908
2909         case op_check_has_instance:
2910             addToGraph(CheckHasInstance, get(VirtualRegister(currentInstruction[3].u.operand)));
2911             NEXT_OPCODE(op_check_has_instance);
2912
2913         case op_instanceof: {
2914             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2915             Node* prototype = get(VirtualRegister(currentInstruction[3].u.operand));
2916             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(InstanceOf, value, prototype));
2917             NEXT_OPCODE(op_instanceof);
2918         }
2919             
2920         case op_is_undefined: {
2921             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2922             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsUndefined, value));
2923             NEXT_OPCODE(op_is_undefined);
2924         }
2925
2926         case op_is_boolean: {
2927             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2928             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsBoolean, value));
2929             NEXT_OPCODE(op_is_boolean);
2930         }
2931
2932         case op_is_number: {
2933             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2934             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsNumber, value));
2935             NEXT_OPCODE(op_is_number);
2936         }
2937
2938         case op_is_string: {
2939             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2940             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsString, value));
2941             NEXT_OPCODE(op_is_string);
2942         }
2943
2944         case op_is_object: {
2945             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2946             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsObject, value));
2947             NEXT_OPCODE(op_is_object);
2948         }
2949
2950         case op_is_object_or_null: {
2951             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2952             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsObjectOrNull, value));
2953             NEXT_OPCODE(op_is_object_or_null);
2954         }
2955
2956         case op_is_function: {
2957             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2958             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsFunction, value));
2959             NEXT_OPCODE(op_is_function);
2960         }
2961
2962         case op_not: {
2963             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2964             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, value));
2965             NEXT_OPCODE(op_not);
2966         }
2967             
2968         case op_to_primitive: {
2969             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2970             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(ToPrimitive, value));
2971             NEXT_OPCODE(op_to_primitive);
2972         }
2973             
2974         case op_strcat: {
2975             int startOperand = currentInstruction[2].u.operand;
2976             int numOperands = currentInstruction[3].u.operand;
2977 #if CPU(X86)
2978             // X86 doesn't have enough registers to compile MakeRope with three arguments.
2979             // Rather than try to be clever, we just make MakeRope dumber on this processor.
2980             const unsigned maxRopeArguments = 2;
2981 #else
2982             const unsigned maxRopeArguments = 3;
2983 #endif
2984             auto toStringNodes = std::make_unique<Node*[]>(numOperands);
2985             for (int i = 0; i < numOperands; i++)
2986                 toStringNodes[i] = addToGraph(ToString, get(VirtualRegister(startOperand - i)));
2987
2988             for (int i = 0; i < numOperands; i++)
2989                 addToGraph(Phantom, toStringNodes[i]);
2990
2991             Node* operands[AdjacencyList::Size];
2992             unsigned indexInOperands = 0;
2993             for (unsigned i = 0; i < AdjacencyList::Size; ++i)
2994                 operands[i] = 0;
2995             for (int operandIdx = 0; operandIdx < numOperands; ++operandIdx) {
2996                 if (indexInOperands == maxRopeArguments) {
2997                     operands[0] = addToGraph(MakeRope, operands[0], operands[1], operands[2]);
2998                     for (unsigned i = 1; i < AdjacencyList::Size; ++i)
2999                         operands[i] = 0;
3000                     indexInOperands = 1;
3001                 }
3002                 
3003                 ASSERT(indexInOperands < AdjacencyList::Size);
3004                 ASSERT(indexInOperands < maxRopeArguments);
3005                 operands[indexInOperands++] = toStringNodes[operandIdx];
3006             }
3007             set(VirtualRegister(currentInstruction[1].u.operand),
3008                 addToGraph(MakeRope, operands[0], operands[1], operands[2]));
3009             NEXT_OPCODE(op_strcat);
3010         }
3011
3012         case op_less: {
3013             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
3014             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
3015             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareLess, op1, op2));
3016             NEXT_OPCODE(op_less);
3017         }
3018
3019         case op_lesseq: {
3020             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
3021             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
3022             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareLessEq, op1, op2));
3023             NEXT_OPCODE(op_lesseq);
3024         }
3025
3026         case op_greater: {
3027             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
3028             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
3029             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareGreater, op1, op2));
3030             NEXT_OPCODE(op_greater);
3031         }
3032
3033         case op_greatereq: {
3034             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
3035             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
3036             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareGreaterEq, op1, op2));
3037             NEXT_OPCODE(op_greatereq);
3038         }
3039
3040         case op_eq: {
3041             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
3042             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
3043             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareEq, op1, op2));
3044             NEXT_OPCODE(op_eq);
3045         }
3046
3047         case op_eq_null: {
3048             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
3049             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareEqConstant, value, addToGraph(JSConstant, OpInfo(m_constantNull))));
3050             NEXT_OPCODE(op_eq_null);
3051         }
3052
3053         case op_stricteq: {
3054             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
3055             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
3056             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareStrictEq, op1, op2));
3057             NEXT_OPCODE(op_stricteq);
3058         }
3059
3060         case op_neq: {
3061             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
3062             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
3063             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, addToGraph(CompareEq, op1, op2)));
3064             NEXT_OPCODE(op_neq);
3065         }
3066
3067         case op_neq_null: {
3068             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
3069             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, addToGraph(CompareEqConstant, value, addToGraph(JSConstant, OpInfo(m_constantNull)))));
3070             NEXT_OPCODE(op_neq_null);
3071         }
3072
3073         case op_nstricteq: {
3074             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
3075             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
3076             Node* invertedResult;
3077             invertedResult = addToGraph(CompareStrictEq, op1, op2);
3078             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, invertedResult));
3079             NEXT_OPCODE(op_nstricteq);
3080         }
3081
3082         // === Property access operations ===
3083
3084         case op_get_by_val: {
3085             SpeculatedType prediction = getPredictionWithoutOSRExit();
3086             
3087             Node* base = get(VirtualRegister(currentInstruction[2].u.operand));
3088             ArrayMode arrayMode = getArrayMode(currentInstruction[4].u.arrayProfile, Array::Read);
3089             Node* property = get(VirtualRegister(currentInstruction[3].u.operand));
3090             Node* getByVal = addToGraph(GetByVal, OpInfo(arrayMode.asWord()), OpInfo(prediction), base, property);
3091             set(VirtualRegister(currentInstruction[1].u.operand), getByVal);
3092
3093             NEXT_OPCODE(op_get_by_val);
3094         }
3095
3096         case op_put_by_val_direct:
3097         case op_put_by_val: {
3098             Node* base = get(VirtualRegister(currentInstruction[1].u.operand));
3099
3100             ArrayMode arrayMode = getArrayMode(currentInstruction[4].u.arrayProfile, Array::Write);
3101             
3102             Node* property = get(VirtualRegister(currentInstruction[2].u.operand));
3103             Node* value = get(VirtualRegister(currentInstruction[3].u.operand));
3104             
3105             addVarArgChild(base);
3106             addVarArgChild(property);
3107             addVarArgChild(value);
3108             addVarArgChild(0); // Leave room for property storage.
3109             addVarArgChild(0); // Leave room for length.
3110             addToGraph(Node::VarArg, opcodeID == op_put_by_val_direct ? PutByValDirect : PutByVal, OpInfo(arrayMode.asWord()), OpInfo(0));
3111
3112             NEXT_OPCODE(op_put_by_val);
3113         }
3114             
3115         case op_get_by_id:
3116         case op_get_by_id_out_of_line:
3117         case op_get_array_length: {
3118             SpeculatedType prediction = getPrediction();
3119             
3120             Node* base = get(VirtualRegister(currentInstruction[2].u.operand));
3121             unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand];
3122             
3123             UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber];
3124             GetByIdStatus getByIdStatus = GetByIdStatus::computeFor(
3125                 m_inlineStackTop->m_profiledBlock, m_dfgCodeBlock,
3126                 m_inlineStackTop->m_stubInfos, m_dfgStubInfos,
3127                 currentCodeOrigin(), uid);
3128             
3129             handleGetById(
3130                 currentInstruction[1].u.operand, prediction, base, identifierNumber, getByIdStatus);
3131
3132             NEXT_OPCODE(op_get_by_id);
3133         }
3134         case op_put_by_id:
3135         case op_put_by_id_out_of_line:
3136         case op_put_by_id_transition_direct:
3137         case op_put_by_id_transition_normal:
3138         case op_put_by_id_transition_direct_out_of_line:
3139         case op_put_by_id_transition_normal_out_of_line: {
3140             Node* value = get(VirtualRegister(currentInstruction[3].u.operand));
3141             Node* base = get(VirtualRegister(currentInstruction[1].u.operand));
3142             unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
3143             bool direct = currentInstruction[8].u.operand;
3144
3145             PutByIdStatus putByIdStatus = PutByIdStatus::computeFor(
3146                 m_inlineStackTop->m_profiledBlock, m_dfgCodeBlock,
3147                 m_inlineStackTop->m_stubInfos, m_dfgStubInfos,
3148                 currentCodeOrigin(), m_graph.identifiers()[identifierNumber]);
3149             
3150             handlePutById(base, identifierNumber, value, putByIdStatus, direct);
3151             NEXT_OPCODE(op_put_by_id);
3152         }
3153
3154         case op_init_global_const_nop: {
3155             NEXT_OPCODE(op_init_global_const_nop);
3156         }
3157
3158         case op_init_global_const: {
3159             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
3160             JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
3161             addToGraph(
3162                 PutGlobalVar,
3163                 OpInfo(globalObject->assertVariableIsInThisObject(currentInstruction[1].u.variablePointer)),
3164                 weakJSConstant(globalObject), value);
3165             NEXT_OPCODE(op_init_global_const);
3166         }
3167
3168         case op_profile_type: {
3169             Node* valueToProfile = get(VirtualRegister(currentInstruction[1].u.operand));
3170             addToGraph(ProfileType, OpInfo(currentInstruction[2].u.location), valueToProfile);
3171             NEXT_OPCODE(op_profile_type);
3172         }
3173
3174         case op_profile_control_flow: {
3175             BasicBlockLocation* basicBlockLocation = currentInstruction[1].u.basicBlockLocation;
3176             addToGraph(ProfileControlFlow, OpInfo(basicBlockLocation));
3177             NEXT_OPCODE(op_profile_control_flow);
3178         }
3179
3180         // === Block terminators. ===
3181
3182         case op_jmp: {
3183             int relativeOffset = currentInstruction[1].u.operand;
3184             addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
3185             if (relativeOffset <= 0)
3186                 flushForTerminal();
3187             LAST_OPCODE(op_jmp);
3188         }
3189
3190         case op_jtrue: {
3191             unsigned relativeOffset = currentInstruction[2].u.operand;
3192             Node* condition = get(VirtualRegister(currentInstruction[1].u.operand));
3193             addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jtrue))), condition);
3194             LAST_OPCODE(op_jtrue);
3195         }
3196
3197         case op_jfalse: {
3198             unsigned relativeOffset = currentInstruction[2].u.operand;
3199             Node* condition = get(VirtualRegister(currentInstruction[1].u.operand));
3200             addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jfalse), m_currentIndex + relativeOffset)), condition);
3201             LAST_OPCODE(op_jfalse);
3202         }
3203
3204         case op_jeq_null: {
3205             unsigned relativeOffset = currentInstruction[2].u.operand;
3206             Node* value = get(VirtualRegister(currentInstruction[1].u.operand));
3207             Node* condition = addToGraph(CompareEqConstant, value, addToGraph(JSConstant, OpInfo(m_constantNull)));
3208             addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jeq_null))), condition);
3209             LAST_OPCODE(op_jeq_null);
3210         }
3211
3212         case op_jneq_null: {
3213             unsigned relativeOffset = currentInstruction[2].u.operand;
3214             Node* value = get(VirtualRegister(currentInstruction[1].u.operand));
3215             Node* condition = addToGraph(CompareEqConstant, value, addToGraph(JSConstant, OpInfo(m_constantNull)));
3216             addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jneq_null), m_currentIndex + relativeOffset)), condition);
3217             LAST_OPCODE(op_jneq_null);
3218         }
3219
3220         case op_jless: {
3221             unsigned relativeOffset = currentInstruction[3].u.operand;
3222             Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
3223             Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
3224             Node* condition = addToGraph(CompareLess, op1, op2);
3225             addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jless))), condition);
3226             LAST_OPCODE(op_jless);
3227         }
3228
3229         case op_jlesseq: {
3230             unsigned relativeOffset = currentInstruction[3].u.operand;
3231             Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
3232             Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
3233             Node* condition = addToGraph(CompareLessEq, op1, op2);
3234             addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jlesseq))), condition);
3235             LAST_OPCODE(op_jlesseq);
3236         }
3237
3238         case op_jgreater: {
3239             unsigned relativeOffset = currentInstruction[3].u.operand;
3240             Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
3241             Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
3242             Node* condition = addToGraph(CompareGreater, op1, op2);
3243             addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jgreater))), condition);
3244             LAST_OPCODE(op_jgreater);
3245         }
3246
3247         case op_jgreatereq: {
3248             unsigned relativeOffset = currentInstruction[3].u.operand;
3249             Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
3250             Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
3251             Node* condition = addToGraph(CompareGreaterEq, op1, op2);
3252             addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jgreatereq))), condition);
3253             LAST_OPCODE(op_jgreatereq);
3254         }
3255
3256         case op_jnless: {
3257             unsigned relativeOffset = currentInstruction[3].u.operand;
3258             Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
3259             Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
3260             Node* condition = addToGraph(CompareLess, op1, op2);
3261             addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jnless), m_currentIndex + relativeOffset)), condition);
3262             LAST_OPCODE(op_jnless);
3263         }
3264
3265         case op_jnlesseq: {
3266             unsigned relativeOffset = currentInstruction[3].u.operand;
3267             Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
3268             Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
3269             Node* condition = addToGraph(CompareLessEq, op1, op2);
3270             addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jnlesseq), m_currentIndex + relativeOffset)), condition);
3271             LAST_OPCODE(op_jnlesseq);
3272         }
3273
3274         case op_jngreater: {
3275             unsigned relativeOffset = currentInstruction[3].u.operand;
3276             Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
3277             Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
3278             Node* condition = addToGraph(CompareGreater, op1, op2);
3279             addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jngreater), m_currentIndex + relativeOffset)), condition);
3280             LAST_OPCODE(op_jngreater);
3281         }
3282
3283         case op_jngreatereq: {
3284             unsigned relativeOffset = currentInstruction[3].u.operand;
3285             Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
3286             Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
3287             Node* condition = addToGraph(CompareGreaterEq, op1, op2);
3288             addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jngreatereq), m_currentIndex + relativeOffset)), condition);
3289             LAST_OPCODE(op_jngreatereq);
3290         }
3291             
3292         case op_switch_imm: {
3293             SwitchData& data = *m_graph.m_switchData.add();
3294             data.kind = SwitchImm;
3295             data.switchTableIndex = m_inlineStackTop->m_switchRemap[currentInstruction[1].u.operand];
3296             data.fallThrough.setBytecodeIndex(m_currentIndex + currentInstruction[2].u.operand);
3297             SimpleJumpTable& table = m_codeBlock->switchJumpTable(data.switchTableIndex);
3298             for (unsigned i = 0; i < table.branchOffsets.size(); ++i) {
3299                 if (!table.branchOffsets[i])
3300                     continue;
3301                 unsigned target = m_currentIndex + table.branchOffsets[i];
3302                 if (target == data.fallThrough.bytecodeIndex())
3303                     continue;
3304                 data.cases.append(SwitchCase::withBytecodeIndex(m_graph.freeze(jsNumber(static_cast<int32_t>(table.min + i))), target));
3305             }
3306             addToGraph(Switch, OpInfo(&data), get(VirtualRegister(currentInstruction[3].u.operand)));
3307             flushIfTerminal(data);
3308             LAST_OPCODE(op_switch_imm);
3309         }
3310             
3311         case op_switch_char: {
3312             SwitchData& data = *m_graph.m_switchData.add();
3313             data.kind = SwitchChar;
3314             data.switchTableIndex = m_inlineStackTop->m_switchRemap[currentInstruction[1].u.operand];
3315             data.fallThrough.setBytecodeIndex(m_currentIndex + currentInstruction[2].u.operand);
3316             SimpleJumpTable& table = m_codeBlock->switchJumpTable(data.switchTableIndex);
3317             for (unsigned i = 0; i < table.branchOffsets.size(); ++i) {
3318                 if (!table.branchOffsets[i])
3319                     continue;
3320                 unsigned target = m_currentIndex + table.branchOffsets[i];
3321                 if (target == data.fallThrough.bytecodeIndex())
3322                     continue;
3323                 data.cases.append(
3324                     SwitchCase::withBytecodeIndex(LazyJSValue::singleCharacterString(table.min + i), target));
3325             }
3326             addToGraph(Switch, OpInfo(&data), get(VirtualRegister(currentInstruction[3].u.operand)));
3327             flushIfTerminal(data);
3328             LAST_OPCODE(op_switch_char);
3329         }
3330
3331         case op_switch_string: {
3332             SwitchData& data = *m_graph.m_switchData.add();
3333             data.kind = SwitchString;
3334             data.switchTableIndex = currentInstruction[1].u.operand;
3335             data.fallThrough.setBytecodeIndex(m_currentIndex + currentInstruction[2].u.operand);
3336             StringJumpTable& table = m_codeBlock->stringSwitchJumpTable(data.switchTableIndex);
3337             StringJumpTable::StringOffsetTable::iterator iter;
3338             StringJumpTable::StringOffsetTable::iterator end = table.offsetTable.end();
3339             for (iter = table.offsetTable.begin(); iter != end; ++iter) {
3340                 unsigned target = m_currentIndex + iter->value.branchOffset;
3341                 if (target == data.fallThrough.bytecodeIndex())
3342                     continue;
3343                 data.cases.append(
3344                     SwitchCase::withBytecodeIndex(LazyJSValue::knownStringImpl(iter->key.get()), target));
3345             }
3346             addToGraph(Switch, OpInfo(&data), get(VirtualRegister(currentInstruction[3].u.operand)));
3347             flushIfTerminal(data);
3348             LAST_OPCODE(op_switch_string);
3349         }
3350
3351         case op_ret:
3352             if (inlineCallFrame()) {
3353                 flushForReturn();
3354                 if (m_inlineStackTop->m_returnValue.isValid())
3355                     setDirect(m_inlineStackTop->m_returnValue, get(VirtualRegister(currentInstruction[1].u.operand)), ImmediateSetWithFlush);
3356                 m_inlineStackTop->m_didReturn = true;
3357                 if (m_inlineStackTop->m_unlinkedBlocks.isEmpty()) {
3358                     // If we're returning from the first block, then we're done parsing.
3359                     ASSERT(m_inlineStackTop->m_callsiteBlockHead == m_graph.lastBlock());
3360                     shouldContinueParsing = false;
3361                     LAST_OPCODE(op_ret);
3362                 } else {
3363                     // If inlining created blocks, and we're doing a return, then we need some
3364                     // special linking.
3365                     ASSERT(m_inlineStackTop->m_unlinkedBlocks.last().m_block == m_graph.lastBlock());
3366                     m_inlineStackTop->m_unlinkedBlocks.last().m_needsNormalLinking = false;
3367                 }
3368                 if (m_currentIndex + OPCODE_LENGTH(op_ret) != m_inlineStackTop->m_codeBlock->instructions().size() || m_inlineStackTop->m_didEarlyReturn) {
3369                     ASSERT(m_currentIndex + OPCODE_LENGTH(op_ret) <= m_inlineStackTop->m_codeBlock->instructions().size());
3370                     addToGraph(Jump, OpInfo(0));
3371                     m_inlineStackTop->m_unlinkedBlocks.last().m_needsEarlyReturnLinking = true;
3372                     m_inlineStackTop->m_didEarlyReturn = true;
3373                 }
3374                 LAST_OPCODE(op_ret);
3375             }
3376             addToGraph(Return, get(VirtualRegister(currentInstruction[1].u.operand)));
3377             flushForReturn();
3378             LAST_OPCODE(op_ret);
3379             
3380         case op_end:
3381             ASSERT(!inlineCallFrame());
3382             addToGraph(Return, get(VirtualRegister(currentInstruction[1].u.operand)));
3383             flushForReturn();
3384             LAST_OPCODE(op_end);
3385
3386         case op_throw:
3387             addToGraph(Throw, get(VirtualRegister(currentInstruction[1].u.operand)));
3388             flushForTerminal();
3389             addToGraph(Unreachable);
3390             LAST_OPCODE(op_throw);
3391             
3392         case op_throw_static_error:
3393             addToGraph(ThrowReferenceError);
3394             flushForTerminal();
3395             addToGraph(Unreachable);
3396             LAST_OPCODE(op_throw_static_error);
3397             
3398         case op_call:
3399             handleCall(currentInstruction, Call, CodeForCall);
3400             NEXT_OPCODE(op_call);
3401             
3402         case op_construct:
3403             handleCall(currentInstruction, Construct, CodeForConstruct);
3404             NEXT_OPCODE(op_construct);
3405             
3406         case op_call_varargs: {
3407             handleVarargsCall(currentInstruction, CallVarargs, CodeForCall);
3408             NEXT_OPCODE(op_call_varargs);
3409         }
3410             
3411         case op_construct_varargs: {
3412             handleVarargsCall(currentInstruction, ConstructVarargs, CodeForConstruct);
3413             NEXT_OPCODE(op_construct_varargs);
3414         }
3415             
3416         case op_jneq_ptr:
3417             // Statically speculate for now. It makes sense to let speculate-only jneq_ptr
3418             // support simmer for a while before making it more general, since it's
3419             // already gnarly enough as it is.
3420             ASSERT(pointerIsFunction(currentInstruction[2].u.specialPointer));
3421             addToGraph(
3422                 CheckCell,
3423                 OpInfo(m_graph.freeze(static_cast<JSCell*>(actualPointerFor(
3424                     m_inlineStackTop->m_codeBlock, currentInstruction[2].u.specialPointer)))),
3425                 get(VirtualRegister(currentInstruction[1].u.operand)));
3426             addToGraph(Jump, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jneq_ptr)));
3427             LAST_OPCODE(op_jneq_ptr);
3428
3429         case op_resolve_scope: {
3430             int dst = currentInstruction[1].u.operand;
3431             ResolveType resolveType = static_cast<ResolveType>(currentInstruction[4].u.operand);
3432             unsigned depth = currentInstruction[5].u.operand;
3433
3434             // get_from_scope and put_to_scope depend on this watchpoint forcing OSR exit, so they don't add their own watchpoints.
3435             if (needsVarInjectionChecks(resolveType))
3436                 addToGraph(VarInjectionWatchpoint);
3437
3438             switch (resolveType) {
3439             case GlobalProperty:
3440             case GlobalVar:
3441             case GlobalPropertyWithVarInjectionChecks:
3442             case GlobalVarWithVarInjectionChecks:
3443                 set(VirtualRegister(dst), weakJSConstant(m_inlineStackTop->m_codeBlock->globalObject()));
3444                 if (resolveType == GlobalPropertyWithVarInjectionChecks || resolveType == GlobalVarWithVarInjectionChecks)
3445                     addToGraph(Phantom, getDirect(m_inlineStackTop->remapOperand(VirtualRegister(currentInstruction[2].u.operand))));
3446                 break;
3447             case LocalClosureVar:
3448             case ClosureVar:
3449             case ClosureVarWithVarInjectionChecks: {
3450                 Node* localBase = get(VirtualRegister(currentInstruction[2].u.operand));
3451                 addToGraph(Phantom, localBase); // OSR exit cannot handle resolve_scope on a DCE'd scope.
3452                 
3453                 // We have various forms of constant folding here. This is necessary to avoid
3454                 // spurious recompiles in dead-but-foldable code.
3455                 if (SymbolTable* symbolTable = currentInstruction[6].u.symbolTable.get()) {
3456                     InferredValue* singleton = symbolTable->singletonScope();
3457                     if (JSValue value = singleton->inferredValue()) {
3458                         m_graph.watchpoints().addLazily(singleton);
3459                         set(VirtualRegister(dst), weakJSConstant(value));
3460                         break;
3461                     }
3462                 }
3463                 if (JSScope* scope = localBase->dynamicCastConstant<JSScope*>()) {
3464                     for (unsigned n = depth; n--;)
3465                         scope = scope->next();
3466                     set(VirtualRegister(dst), weakJSConstant(scope));
3467                     break;
3468                 }
3469                 for (unsigned n = depth; n--;)
3470                     localBase = addToGraph(SkipScope, localBase);
3471                 set(VirtualRegister(dst), localBase);
3472                 break;
3473             }
3474             case Dynamic:
3475                 RELEASE_ASSERT_NOT_REACHED();
3476                 break;
3477             }
3478             NEXT_OPCODE(op_resolve_scope);
3479         }
3480
3481         case op_get_from_scope: {
3482             int dst = currentInstruction[1].u.operand;
3483             int scope = currentInstruction[2].u.operand;
3484             unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand];
3485             UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber];
3486             ResolveType resolveType = ResolveModeAndType(currentInstruction[4].u.operand).type();
3487
3488             Structure* structure = 0;
3489             WatchpointSet* watchpoints = 0;
3490             uintptr_t operand;
3491             {
3492                 ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
3493                 if (resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks)
3494                     watchpoints = currentInstruction[5].u.watchpointSet;
3495                 else
3496                     structure = currentInstruction[5].u.structure.get();
3497                 operand = reinterpret_cast<uintptr_t>(currentInstruction[6].u.pointer);
3498             }
3499
3500             UNUSED_PARAM(watchpoints); // We will use this in the future. For now we set it as a way of documenting the fact that that's what index 5 is in GlobalVar mode.
3501
3502             JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
3503
3504             switch (resolveType) {
3505             case GlobalProperty:
3506             case GlobalPropertyWithVarInjectionChecks: {
3507                 SpeculatedType prediction = getPrediction();
3508                 GetByIdStatus status = GetByIdStatus::computeFor(structure, uid);
3509                 if (status.state() != GetByIdStatus::Simple
3510                     || status.numVariants() != 1
3511                     || status[0].structureSet().size() != 1) {
3512                     set(VirtualRegister(dst), addToGraph(GetByIdFlush, OpInfo(identifierNumber), OpInfo(prediction), get(VirtualRegister(scope))));
3513                     break;
3514                 }
3515                 Node* base = cellConstantWithStructureCheck(globalObject, status[0].structureSet().onlyStructure());
3516                 addToGraph(Phantom, get(VirtualRegister(scope)));
3517                 set(VirtualRegister(dst), handleGetByOffset(prediction, base, status[0].structureSet(), identifierNumber, operand));
3518                 break;
3519             }
3520             case GlobalVar:
3521             case GlobalVarWithVarInjectionChecks: {
3522                 addToGraph(Phantom, get(VirtualRegister(scope)));
3523                 WatchpointSet* watchpointSet;
3524                 ScopeOffset offset;
3525                 {
3526                     ConcurrentJITLocker locker(globalObject->symbolTable()->m_lock);
3527                     SymbolTableEntry entry = globalObject->symbolTable()->get(locker, uid);
3528                     watchpointSet = entry.watchpointSet();
3529                     offset = entry.scopeOffset();
3530                 }
3531                 if (watchpointSet && watchpointSet->state() == IsWatched) {
3532                     // This has a fun concurrency story. There is the possibility of a race in two
3533                     // directions:
3534                     //
3535                     // We see that the set IsWatched, but in the meantime it gets invalidated: this is
3536                     // fine because if we saw that it IsWatched then we add a watchpoint. If it gets
3537                     // invalidated, then this compilation is invalidated. Note that in the meantime we
3538                     // may load an absurd value from the global object. It's fine to load an absurd
3539                     // value if the compilation is invalidated anyway.
3540                     //
3541                     // We see that the set IsWatched, but the value isn't yet initialized: this isn't
3542                     // possible because of the ordering of operations.
3543                     //
3544                     // Here's how we order operations:
3545                     //
3546                     // Main thread stores to the global object: always store a value first, and only
3547                     // after that do we touch the watchpoint set. There is a fence in the touch, that
3548                     // ensures that the store to the global object always happens before the touch on the
3549                     // set.
3550                     //
3551                     // Compilation thread: always first load the state of the watchpoint set, and then
3552                     // load the value. The WatchpointSet::state() method does fences for us to ensure
3553                     // that the load of the state happens before our load of the value.
3554                     //
3555                     // Finalizing compilation: this happens on the main thread and synchronously checks
3556                     // validity of all watchpoint sets.
3557                     //
3558                     // We will only perform optimizations if the load of the state yields IsWatched. That
3559                     // means that at least one store would have happened to initialize the original value
3560                     // of the variable (that is, the value we'd like to constant fold to). There may be
3561                     // other stores that happen after that, but those stores will invalidate the
3562                     // watchpoint set and also the compilation.
3563                     
3564                     // Note that we need to use the operand, which is a direct pointer at the global,
3565                     // rather than looking up the global by doing variableAt(offset). That's because the
3566                     // internal data structures of JSSegmentedVariableObject are not thread-safe even
3567                     // though accessing the global itself is. The segmentation involves a vector spine
3568                     // that resizes with malloc/free, so if new globals unrelated to the one we are
3569                     // reading are added, we might access freed memory if we do variableAt().
3570                     WriteBarrier<Unknown>* pointer = bitwise_cast<WriteBarrier<Unknown>*>(operand);
3571                     
3572                     ASSERT(globalObject->findVariableIndex(pointer) == offset);
3573                     
3574      &nb