13c03f8bb697ab54bb2da794228401c31b7cb8c0
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGByteCodeParser.cpp
1  /*
2  * Copyright (C) 2011, 2012, 2013, 2014 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGByteCodeParser.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "ArrayConstructor.h"
32 #include "CallLinkStatus.h"
33 #include "CodeBlock.h"
34 #include "CodeBlockWithJITType.h"
35 #include "DFGArrayMode.h"
36 #include "DFGCapabilities.h"
37 #include "DFGJITCode.h"
38 #include "GetByIdStatus.h"
39 #include "Heap.h"
40 #include "JSActivation.h"
41 #include "JSCInlines.h"
42 #include "PreciseJumpTargets.h"
43 #include "PutByIdStatus.h"
44 #include "StackAlignment.h"
45 #include "StringConstructor.h"
46 #include <wtf/CommaPrinter.h>
47 #include <wtf/HashMap.h>
48 #include <wtf/MathExtras.h>
49 #include <wtf/StdLibExtras.h>
50
51 namespace JSC { namespace DFG {
52
53 class ConstantBufferKey {
54 public:
55     ConstantBufferKey()
56         : m_codeBlock(0)
57         , m_index(0)
58     {
59     }
60     
61     ConstantBufferKey(WTF::HashTableDeletedValueType)
62         : m_codeBlock(0)
63         , m_index(1)
64     {
65     }
66     
67     ConstantBufferKey(CodeBlock* codeBlock, unsigned index)
68         : m_codeBlock(codeBlock)
69         , m_index(index)
70     {
71     }
72     
73     bool operator==(const ConstantBufferKey& other) const
74     {
75         return m_codeBlock == other.m_codeBlock
76             && m_index == other.m_index;
77     }
78     
79     unsigned hash() const
80     {
81         return WTF::PtrHash<CodeBlock*>::hash(m_codeBlock) ^ m_index;
82     }
83     
84     bool isHashTableDeletedValue() const
85     {
86         return !m_codeBlock && m_index;
87     }
88     
89     CodeBlock* codeBlock() const { return m_codeBlock; }
90     unsigned index() const { return m_index; }
91     
92 private:
93     CodeBlock* m_codeBlock;
94     unsigned m_index;
95 };
96
97 struct ConstantBufferKeyHash {
98     static unsigned hash(const ConstantBufferKey& key) { return key.hash(); }
99     static bool equal(const ConstantBufferKey& a, const ConstantBufferKey& b)
100     {
101         return a == b;
102     }
103     
104     static const bool safeToCompareToEmptyOrDeleted = true;
105 };
106
107 } } // namespace JSC::DFG
108
109 namespace WTF {
110
111 template<typename T> struct DefaultHash;
112 template<> struct DefaultHash<JSC::DFG::ConstantBufferKey> {
113     typedef JSC::DFG::ConstantBufferKeyHash Hash;
114 };
115
116 template<typename T> struct HashTraits;
117 template<> struct HashTraits<JSC::DFG::ConstantBufferKey> : SimpleClassHashTraits<JSC::DFG::ConstantBufferKey> { };
118
119 } // namespace WTF
120
121 namespace JSC { namespace DFG {
122
123 // === ByteCodeParser ===
124 //
125 // This class is used to compile the dataflow graph from a CodeBlock.
126 class ByteCodeParser {
127 public:
128     ByteCodeParser(Graph& graph)
129         : m_vm(&graph.m_vm)
130         , m_codeBlock(graph.m_codeBlock)
131         , m_profiledBlock(graph.m_profiledBlock)
132         , m_graph(graph)
133         , m_currentBlock(0)
134         , m_currentIndex(0)
135         , m_constantUndefined(graph.freeze(jsUndefined()))
136         , m_constantNull(graph.freeze(jsNull()))
137         , m_constantNaN(graph.freeze(jsNumber(PNaN)))
138         , m_constantOne(graph.freeze(jsNumber(1)))
139         , m_numArguments(m_codeBlock->numParameters())
140         , m_numLocals(m_codeBlock->m_numCalleeRegisters)
141         , m_parameterSlots(0)
142         , m_numPassedVarArgs(0)
143         , m_inlineStackTop(0)
144         , m_haveBuiltOperandMaps(false)
145         , m_currentInstruction(0)
146     {
147         ASSERT(m_profiledBlock);
148     }
149     
150     // Parse a full CodeBlock of bytecode.
151     bool parse();
152     
153 private:
154     struct InlineStackEntry;
155
156     // Just parse from m_currentIndex to the end of the current CodeBlock.
157     void parseCodeBlock();
158     
159     void ensureLocals(unsigned newNumLocals)
160     {
161         if (newNumLocals <= m_numLocals)
162             return;
163         m_numLocals = newNumLocals;
164         for (size_t i = 0; i < m_graph.numBlocks(); ++i)
165             m_graph.block(i)->ensureLocals(newNumLocals);
166     }
167
168     // Helper for min and max.
169     bool handleMinMax(int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis);
170     
171     // Handle calls. This resolves issues surrounding inlining and intrinsics.
172     void handleCall(
173         int result, NodeType op, InlineCallFrame::Kind, unsigned instructionSize,
174         Node* callTarget, int argCount, int registerOffset, CallLinkStatus,
175         SpeculatedType prediction);
176     void handleCall(
177         int result, NodeType op, InlineCallFrame::Kind, unsigned instructionSize,
178         Node* callTarget, int argCount, int registerOffset, CallLinkStatus);
179     void handleCall(int result, NodeType op, CodeSpecializationKind, unsigned instructionSize, int callee, int argCount, int registerOffset);
180     void handleCall(Instruction* pc, NodeType op, CodeSpecializationKind);
181     void emitFunctionChecks(const CallLinkStatus&, Node* callTarget, int registerOffset, CodeSpecializationKind);
182     void emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis, CodeSpecializationKind);
183     // Handle inlining. Return true if it succeeded, false if we need to plant a call.
184     bool handleInlining(Node* callTargetNode, int resultOperand, const CallLinkStatus&, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind);
185     // Handle intrinsic functions. Return true if it succeeded, false if we need to plant a call.
186     bool handleIntrinsic(int resultOperand, Intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction);
187     bool handleTypedArrayConstructor(int resultOperand, InternalFunction*, int registerOffset, int argumentCountIncludingThis, TypedArrayType);
188     bool handleConstantInternalFunction(int resultOperand, InternalFunction*, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, CodeSpecializationKind);
189     Node* handlePutByOffset(Node* base, unsigned identifier, PropertyOffset, Node* value);
190     Node* handleGetByOffset(SpeculatedType, Node* base, const StructureSet&, unsigned identifierNumber, PropertyOffset, NodeType op = GetByOffset);
191     void handleGetById(
192         int destinationOperand, SpeculatedType, Node* base, unsigned identifierNumber,
193         const GetByIdStatus&);
194     void emitPutById(
195         Node* base, unsigned identifierNumber, Node* value,  const PutByIdStatus&, bool isDirect);
196     void handlePutById(
197         Node* base, unsigned identifierNumber, Node* value, const PutByIdStatus&,
198         bool isDirect);
199     void emitChecks(const ConstantStructureCheckVector&);
200
201     Node* getScope(bool skipTop, unsigned skipCount);
202     
203     // Prepare to parse a block.
204     void prepareToParseBlock();
205     // Parse a single basic block of bytecode instructions.
206     bool parseBlock(unsigned limit);
207     // Link block successors.
208     void linkBlock(BasicBlock*, Vector<BasicBlock*>& possibleTargets);
209     void linkBlocks(Vector<UnlinkedBlock>& unlinkedBlocks, Vector<BasicBlock*>& possibleTargets);
210     
211     VariableAccessData* newVariableAccessData(VirtualRegister operand, bool isCaptured)
212     {
213         ASSERT(!operand.isConstant());
214         
215         m_graph.m_variableAccessData.append(VariableAccessData(operand, isCaptured));
216         return &m_graph.m_variableAccessData.last();
217     }
218     
219     // Get/Set the operands/result of a bytecode instruction.
220     Node* getDirect(VirtualRegister operand)
221     {
222         ASSERT(!operand.isConstant());
223
224         // Is this an argument?
225         if (operand.isArgument())
226             return getArgument(operand);
227
228         // Must be a local.
229         return getLocal(operand);
230     }
231
232     Node* get(VirtualRegister operand)
233     {
234         if (operand.isConstant()) {
235             unsigned constantIndex = operand.toConstantIndex();
236             unsigned oldSize = m_constants.size();
237             if (constantIndex >= oldSize || !m_constants[constantIndex]) {
238                 JSValue value = m_inlineStackTop->m_codeBlock->getConstant(operand.offset());
239                 if (constantIndex >= oldSize) {
240                     m_constants.grow(constantIndex + 1);
241                     for (unsigned i = oldSize; i < m_constants.size(); ++i)
242                         m_constants[i] = nullptr;
243                 }
244                 m_constants[constantIndex] =
245                     addToGraph(JSConstant, OpInfo(m_graph.freezeStrong(value)));
246             }
247             ASSERT(m_constants[constantIndex]);
248             return m_constants[constantIndex];
249         }
250         
251         if (inlineCallFrame()) {
252             if (!inlineCallFrame()->isClosureCall) {
253                 JSFunction* callee = inlineCallFrame()->calleeConstant();
254                 if (operand.offset() == JSStack::Callee)
255                     return weakJSConstant(callee);
256                 if (operand.offset() == JSStack::ScopeChain)
257                     return weakJSConstant(callee->scope());
258             }
259         } else if (operand.offset() == JSStack::Callee)
260             return addToGraph(GetCallee);
261         else if (operand.offset() == JSStack::ScopeChain)
262             return addToGraph(GetMyScope);
263         
264         return getDirect(m_inlineStackTop->remapOperand(operand));
265     }
266     
267     enum SetMode {
268         // A normal set which follows a two-phase commit that spans code origins. During
269         // the current code origin it issues a MovHint, and at the start of the next
270         // code origin there will be a SetLocal. If the local needs flushing, the second
271         // SetLocal will be preceded with a Flush.
272         NormalSet,
273         
274         // A set where the SetLocal happens immediately and there is still a Flush. This
275         // is relevant when assigning to a local in tricky situations for the delayed
276         // SetLocal logic but where we know that we have not performed any side effects
277         // within this code origin. This is a safe replacement for NormalSet anytime we
278         // know that we have not yet performed side effects in this code origin.
279         ImmediateSetWithFlush,
280         
281         // A set where the SetLocal happens immediately and we do not Flush it even if
282         // this is a local that is marked as needing it. This is relevant when
283         // initializing locals at the top of a function.
284         ImmediateNakedSet
285     };
286     Node* setDirect(VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
287     {
288         addToGraph(MovHint, OpInfo(operand.offset()), value);
289         
290         DelayedSetLocal delayed = DelayedSetLocal(operand, value);
291         
292         if (setMode == NormalSet) {
293             m_setLocalQueue.append(delayed);
294             return 0;
295         }
296         
297         return delayed.execute(this, setMode);
298     }
299
300     Node* set(VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
301     {
302         return setDirect(m_inlineStackTop->remapOperand(operand), value, setMode);
303     }
304     
305     Node* injectLazyOperandSpeculation(Node* node)
306     {
307         ASSERT(node->op() == GetLocal);
308         ASSERT(node->origin.semantic.bytecodeIndex == m_currentIndex);
309         ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
310         LazyOperandValueProfileKey key(m_currentIndex, node->local());
311         SpeculatedType prediction = m_inlineStackTop->m_lazyOperands.prediction(locker, key);
312         node->variableAccessData()->predict(prediction);
313         return node;
314     }
315
316     // Used in implementing get/set, above, where the operand is a local variable.
317     Node* getLocal(VirtualRegister operand)
318     {
319         unsigned local = operand.toLocal();
320
321         if (local < m_localWatchpoints.size()) {
322             if (VariableWatchpointSet* set = m_localWatchpoints[local]) {
323                 if (JSValue value = set->inferredValue()) {
324                     addToGraph(FunctionReentryWatchpoint, OpInfo(m_codeBlock->symbolTable()));
325                     addToGraph(VariableWatchpoint, OpInfo(set));
326                     return weakJSConstant(value);
327                 }
328             }
329         }
330
331         Node* node = m_currentBlock->variablesAtTail.local(local);
332         bool isCaptured = m_codeBlock->isCaptured(operand, inlineCallFrame());
333         
334         // This has two goals: 1) link together variable access datas, and 2)
335         // try to avoid creating redundant GetLocals. (1) is required for
336         // correctness - no other phase will ensure that block-local variable
337         // access data unification is done correctly. (2) is purely opportunistic
338         // and is meant as an compile-time optimization only.
339         
340         VariableAccessData* variable;
341         
342         if (node) {
343             variable = node->variableAccessData();
344             variable->mergeIsCaptured(isCaptured);
345             
346             if (!isCaptured) {
347                 switch (node->op()) {
348                 case GetLocal:
349                     return node;
350                 case SetLocal:
351                     return node->child1().node();
352                 default:
353                     break;
354                 }
355             }
356         } else
357             variable = newVariableAccessData(operand, isCaptured);
358         
359         node = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variable)));
360         m_currentBlock->variablesAtTail.local(local) = node;
361         return node;
362     }
363
364     Node* setLocal(VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
365     {
366         unsigned local = operand.toLocal();
367         bool isCaptured = m_codeBlock->isCaptured(operand, inlineCallFrame());
368         
369         if (setMode != ImmediateNakedSet) {
370             ArgumentPosition* argumentPosition = findArgumentPositionForLocal(operand);
371             if (isCaptured || argumentPosition)
372                 flushDirect(operand, argumentPosition);
373         }
374
375         VariableAccessData* variableAccessData = newVariableAccessData(operand, isCaptured);
376         variableAccessData->mergeStructureCheckHoistingFailed(
377             m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
378         variableAccessData->mergeCheckArrayHoistingFailed(
379             m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIndexingType));
380         Node* node = addToGraph(SetLocal, OpInfo(variableAccessData), value);
381         m_currentBlock->variablesAtTail.local(local) = node;
382         return node;
383     }
384
385     // Used in implementing get/set, above, where the operand is an argument.
386     Node* getArgument(VirtualRegister operand)
387     {
388         unsigned argument = operand.toArgument();
389         ASSERT(argument < m_numArguments);
390         
391         Node* node = m_currentBlock->variablesAtTail.argument(argument);
392         bool isCaptured = m_codeBlock->isCaptured(operand);
393
394         VariableAccessData* variable;
395         
396         if (node) {
397             variable = node->variableAccessData();
398             variable->mergeIsCaptured(isCaptured);
399             
400             switch (node->op()) {
401             case GetLocal:
402                 return node;
403             case SetLocal:
404                 return node->child1().node();
405             default:
406                 break;
407             }
408         } else
409             variable = newVariableAccessData(operand, isCaptured);
410         
411         node = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variable)));
412         m_currentBlock->variablesAtTail.argument(argument) = node;
413         return node;
414     }
415     Node* setArgument(VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
416     {
417         unsigned argument = operand.toArgument();
418         ASSERT(argument < m_numArguments);
419         
420         bool isCaptured = m_codeBlock->isCaptured(operand);
421
422         VariableAccessData* variableAccessData = newVariableAccessData(operand, isCaptured);
423
424         // Always flush arguments, except for 'this'. If 'this' is created by us,
425         // then make sure that it's never unboxed.
426         if (argument) {
427             if (setMode != ImmediateNakedSet)
428                 flushDirect(operand);
429         } else if (m_codeBlock->specializationKind() == CodeForConstruct)
430             variableAccessData->mergeShouldNeverUnbox(true);
431         
432         variableAccessData->mergeStructureCheckHoistingFailed(
433             m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
434         variableAccessData->mergeCheckArrayHoistingFailed(
435             m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIndexingType));
436         Node* node = addToGraph(SetLocal, OpInfo(variableAccessData), value);
437         m_currentBlock->variablesAtTail.argument(argument) = node;
438         return node;
439     }
440     
441     ArgumentPosition* findArgumentPositionForArgument(int argument)
442     {
443         InlineStackEntry* stack = m_inlineStackTop;
444         while (stack->m_inlineCallFrame)
445             stack = stack->m_caller;
446         return stack->m_argumentPositions[argument];
447     }
448     
449     ArgumentPosition* findArgumentPositionForLocal(VirtualRegister operand)
450     {
451         for (InlineStackEntry* stack = m_inlineStackTop; ; stack = stack->m_caller) {
452             InlineCallFrame* inlineCallFrame = stack->m_inlineCallFrame;
453             if (!inlineCallFrame)
454                 break;
455             if (operand.offset() < static_cast<int>(inlineCallFrame->stackOffset + JSStack::CallFrameHeaderSize))
456                 continue;
457             if (operand.offset() == inlineCallFrame->stackOffset + CallFrame::thisArgumentOffset())
458                 continue;
459             if (operand.offset() >= static_cast<int>(inlineCallFrame->stackOffset + CallFrame::thisArgumentOffset() + inlineCallFrame->arguments.size()))
460                 continue;
461             int argument = VirtualRegister(operand.offset() - inlineCallFrame->stackOffset).toArgument();
462             return stack->m_argumentPositions[argument];
463         }
464         return 0;
465     }
466     
467     ArgumentPosition* findArgumentPosition(VirtualRegister operand)
468     {
469         if (operand.isArgument())
470             return findArgumentPositionForArgument(operand.toArgument());
471         return findArgumentPositionForLocal(operand);
472     }
473
474     void flush(VirtualRegister operand)
475     {
476         flushDirect(m_inlineStackTop->remapOperand(operand));
477     }
478     
479     void flushDirect(VirtualRegister operand)
480     {
481         flushDirect(operand, findArgumentPosition(operand));
482     }
483     
484     void flushDirect(VirtualRegister operand, ArgumentPosition* argumentPosition)
485     {
486         bool isCaptured = m_codeBlock->isCaptured(operand, inlineCallFrame());
487         
488         ASSERT(!operand.isConstant());
489         
490         Node* node = m_currentBlock->variablesAtTail.operand(operand);
491         
492         VariableAccessData* variable;
493         
494         if (node) {
495             variable = node->variableAccessData();
496             variable->mergeIsCaptured(isCaptured);
497         } else
498             variable = newVariableAccessData(operand, isCaptured);
499         
500         node = addToGraph(Flush, OpInfo(variable));
501         m_currentBlock->variablesAtTail.operand(operand) = node;
502         if (argumentPosition)
503             argumentPosition->addVariable(variable);
504     }
505     
506     void flush(InlineStackEntry* inlineStackEntry)
507     {
508         int numArguments;
509         if (InlineCallFrame* inlineCallFrame = inlineStackEntry->m_inlineCallFrame) {
510             numArguments = inlineCallFrame->arguments.size();
511             if (inlineCallFrame->isClosureCall) {
512                 flushDirect(inlineStackEntry->remapOperand(VirtualRegister(JSStack::Callee)));
513                 flushDirect(inlineStackEntry->remapOperand(VirtualRegister(JSStack::ScopeChain)));
514             }
515         } else
516             numArguments = inlineStackEntry->m_codeBlock->numParameters();
517         for (unsigned argument = numArguments; argument-- > 1;)
518             flushDirect(inlineStackEntry->remapOperand(virtualRegisterForArgument(argument)));
519         for (int local = 0; local < inlineStackEntry->m_codeBlock->m_numVars; ++local) {
520             if (!inlineStackEntry->m_codeBlock->isCaptured(virtualRegisterForLocal(local)))
521                 continue;
522             flushDirect(inlineStackEntry->remapOperand(virtualRegisterForLocal(local)));
523         }
524     }
525
526     void flushForTerminal()
527     {
528         for (InlineStackEntry* inlineStackEntry = m_inlineStackTop; inlineStackEntry; inlineStackEntry = inlineStackEntry->m_caller)
529             flush(inlineStackEntry);
530     }
531
532     void flushForReturn()
533     {
534         flush(m_inlineStackTop);
535     }
536     
537     void flushIfTerminal(SwitchData& data)
538     {
539         if (data.fallThrough.bytecodeIndex() > m_currentIndex)
540             return;
541         
542         for (unsigned i = data.cases.size(); i--;) {
543             if (data.cases[i].target.bytecodeIndex() > m_currentIndex)
544                 return;
545         }
546         
547         flushForTerminal();
548     }
549
550     // Assumes that the constant should be strongly marked.
551     Node* jsConstant(JSValue constantValue)
552     {
553         return addToGraph(JSConstant, OpInfo(m_graph.freezeStrong(constantValue)));
554     }
555
556     Node* weakJSConstant(JSValue constantValue)
557     {
558         return addToGraph(JSConstant, OpInfo(m_graph.freeze(constantValue)));
559     }
560
561     // Helper functions to get/set the this value.
562     Node* getThis()
563     {
564         return get(m_inlineStackTop->m_codeBlock->thisRegister());
565     }
566
567     void setThis(Node* value)
568     {
569         set(m_inlineStackTop->m_codeBlock->thisRegister(), value);
570     }
571
572     InlineCallFrame* inlineCallFrame()
573     {
574         return m_inlineStackTop->m_inlineCallFrame;
575     }
576
577     CodeOrigin currentCodeOrigin()
578     {
579         return CodeOrigin(m_currentIndex, inlineCallFrame());
580     }
581     
582     BranchData* branchData(unsigned taken, unsigned notTaken)
583     {
584         // We assume that branches originating from bytecode always have a fall-through. We
585         // use this assumption to avoid checking for the creation of terminal blocks.
586         ASSERT((taken > m_currentIndex) || (notTaken > m_currentIndex));
587         BranchData* data = m_graph.m_branchData.add();
588         *data = BranchData::withBytecodeIndices(taken, notTaken);
589         return data;
590     }
591     
592     Node* addToGraph(NodeType op, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
593     {
594         Node* result = m_graph.addNode(
595             SpecNone, op, NodeOrigin(currentCodeOrigin()), Edge(child1), Edge(child2),
596             Edge(child3));
597         ASSERT(op != Phi);
598         m_currentBlock->append(result);
599         return result;
600     }
601     Node* addToGraph(NodeType op, Edge child1, Edge child2 = Edge(), Edge child3 = Edge())
602     {
603         Node* result = m_graph.addNode(
604             SpecNone, op, NodeOrigin(currentCodeOrigin()), child1, child2, child3);
605         ASSERT(op != Phi);
606         m_currentBlock->append(result);
607         return result;
608     }
609     Node* addToGraph(NodeType op, OpInfo info, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
610     {
611         Node* result = m_graph.addNode(
612             SpecNone, op, NodeOrigin(currentCodeOrigin()), info, Edge(child1), Edge(child2),
613             Edge(child3));
614         ASSERT(op != Phi);
615         m_currentBlock->append(result);
616         return result;
617     }
618     Node* addToGraph(NodeType op, OpInfo info1, OpInfo info2, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
619     {
620         Node* result = m_graph.addNode(
621             SpecNone, op, NodeOrigin(currentCodeOrigin()), info1, info2,
622             Edge(child1), Edge(child2), Edge(child3));
623         ASSERT(op != Phi);
624         m_currentBlock->append(result);
625         return result;
626     }
627     
628     Node* addToGraph(Node::VarArgTag, NodeType op, OpInfo info1, OpInfo info2)
629     {
630         Node* result = m_graph.addNode(
631             SpecNone, Node::VarArg, op, NodeOrigin(currentCodeOrigin()), info1, info2,
632             m_graph.m_varArgChildren.size() - m_numPassedVarArgs, m_numPassedVarArgs);
633         ASSERT(op != Phi);
634         m_currentBlock->append(result);
635         
636         m_numPassedVarArgs = 0;
637         
638         return result;
639     }
640
641     void addVarArgChild(Node* child)
642     {
643         m_graph.m_varArgChildren.append(Edge(child));
644         m_numPassedVarArgs++;
645     }
646     
647     Node* addCallWithoutSettingResult(
648         NodeType op, Node* callee, int argCount, int registerOffset,
649         SpeculatedType prediction)
650     {
651         addVarArgChild(callee);
652         size_t parameterSlots = JSStack::CallFrameHeaderSize - JSStack::CallerFrameAndPCSize + argCount;
653         if (parameterSlots > m_parameterSlots)
654             m_parameterSlots = parameterSlots;
655
656         int dummyThisArgument = op == Call || op == NativeCall ? 0 : 1;
657         for (int i = 0 + dummyThisArgument; i < argCount; ++i)
658             addVarArgChild(get(virtualRegisterForArgument(i, registerOffset)));
659
660         return addToGraph(Node::VarArg, op, OpInfo(0), OpInfo(prediction));
661     }
662     
663     Node* addCall(
664         int result, NodeType op, Node* callee, int argCount, int registerOffset,
665         SpeculatedType prediction)
666     {
667         Node* call = addCallWithoutSettingResult(
668             op, callee, argCount, registerOffset, prediction);
669         VirtualRegister resultReg(result);
670         if (resultReg.isValid())
671             set(VirtualRegister(result), call);
672         return call;
673     }
674     
675     Node* cellConstantWithStructureCheck(JSCell* object, Structure* structure)
676     {
677         Node* objectNode = weakJSConstant(object);
678         addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(structure)), objectNode);
679         return objectNode;
680     }
681     
682     SpeculatedType getPredictionWithoutOSRExit(unsigned bytecodeIndex)
683     {
684         ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
685         return m_inlineStackTop->m_profiledBlock->valueProfilePredictionForBytecodeOffset(locker, bytecodeIndex);
686     }
687
688     SpeculatedType getPrediction(unsigned bytecodeIndex)
689     {
690         SpeculatedType prediction = getPredictionWithoutOSRExit(bytecodeIndex);
691         
692         if (prediction == SpecNone) {
693             // We have no information about what values this node generates. Give up
694             // on executing this code, since we're likely to do more damage than good.
695             addToGraph(ForceOSRExit);
696         }
697         
698         return prediction;
699     }
700     
701     SpeculatedType getPredictionWithoutOSRExit()
702     {
703         return getPredictionWithoutOSRExit(m_currentIndex);
704     }
705     
706     SpeculatedType getPrediction()
707     {
708         return getPrediction(m_currentIndex);
709     }
710     
711     ArrayMode getArrayMode(ArrayProfile* profile, Array::Action action)
712     {
713         ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
714         profile->computeUpdatedPrediction(locker, m_inlineStackTop->m_profiledBlock);
715         return ArrayMode::fromObserved(locker, profile, action, false);
716     }
717     
718     ArrayMode getArrayMode(ArrayProfile* profile)
719     {
720         return getArrayMode(profile, Array::Read);
721     }
722     
723     ArrayMode getArrayModeConsideringSlowPath(ArrayProfile* profile, Array::Action action)
724     {
725         ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
726         
727         profile->computeUpdatedPrediction(locker, m_inlineStackTop->m_profiledBlock);
728         
729         bool makeSafe =
730             m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)
731             || profile->outOfBounds(locker);
732         
733         ArrayMode result = ArrayMode::fromObserved(locker, profile, action, makeSafe);
734         
735         return result;
736     }
737     
738     Node* makeSafe(Node* node)
739     {
740         if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
741             node->mergeFlags(NodeMayOverflowInDFG);
742         if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
743             node->mergeFlags(NodeMayNegZeroInDFG);
744         
745         if (!isX86() && node->op() == ArithMod)
746             return node;
747
748         if (!m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex))
749             return node;
750         
751         switch (node->op()) {
752         case UInt32ToNumber:
753         case ArithAdd:
754         case ArithSub:
755         case ValueAdd:
756         case ArithMod: // for ArithMod "MayOverflow" means we tried to divide by zero, or we saw double.
757             node->mergeFlags(NodeMayOverflowInBaseline);
758             break;
759             
760         case ArithNegate:
761             // Currently we can't tell the difference between a negation overflowing
762             // (i.e. -(1 << 31)) or generating negative zero (i.e. -0). If it took slow
763             // path then we assume that it did both of those things.
764             node->mergeFlags(NodeMayOverflowInBaseline);
765             node->mergeFlags(NodeMayNegZeroInBaseline);
766             break;
767
768         case ArithMul:
769             // FIXME: We should detect cases where we only overflowed but never created
770             // negative zero.
771             // https://bugs.webkit.org/show_bug.cgi?id=132470
772             if (m_inlineStackTop->m_profiledBlock->likelyToTakeDeepestSlowCase(m_currentIndex)
773                 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
774                 node->mergeFlags(NodeMayOverflowInBaseline | NodeMayNegZeroInBaseline);
775             else if (m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)
776                 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
777                 node->mergeFlags(NodeMayNegZeroInBaseline);
778             break;
779             
780         default:
781             RELEASE_ASSERT_NOT_REACHED();
782             break;
783         }
784         
785         return node;
786     }
787     
788     Node* makeDivSafe(Node* node)
789     {
790         ASSERT(node->op() == ArithDiv);
791         
792         if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
793             node->mergeFlags(NodeMayOverflowInDFG);
794         if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
795             node->mergeFlags(NodeMayNegZeroInDFG);
796         
797         // The main slow case counter for op_div in the old JIT counts only when
798         // the operands are not numbers. We don't care about that since we already
799         // have speculations in place that take care of that separately. We only
800         // care about when the outcome of the division is not an integer, which
801         // is what the special fast case counter tells us.
802         
803         if (!m_inlineStackTop->m_profiledBlock->couldTakeSpecialFastCase(m_currentIndex))
804             return node;
805         
806         // FIXME: It might be possible to make this more granular.
807         node->mergeFlags(NodeMayOverflowInBaseline | NodeMayNegZeroInBaseline);
808         
809         return node;
810     }
811     
812     void buildOperandMapsIfNecessary();
813     
814     VM* m_vm;
815     CodeBlock* m_codeBlock;
816     CodeBlock* m_profiledBlock;
817     Graph& m_graph;
818
819     // The current block being generated.
820     BasicBlock* m_currentBlock;
821     // The bytecode index of the current instruction being generated.
822     unsigned m_currentIndex;
823
824     FrozenValue* m_constantUndefined;
825     FrozenValue* m_constantNull;
826     FrozenValue* m_constantNaN;
827     FrozenValue* m_constantOne;
828     Vector<Node*, 16> m_constants;
829
830     // The number of arguments passed to the function.
831     unsigned m_numArguments;
832     // The number of locals (vars + temporaries) used in the function.
833     unsigned m_numLocals;
834     // The number of slots (in units of sizeof(Register)) that we need to
835     // preallocate for arguments to outgoing calls from this frame. This
836     // number includes the CallFrame slots that we initialize for the callee
837     // (but not the callee-initialized CallerFrame and ReturnPC slots).
838     // This number is 0 if and only if this function is a leaf.
839     unsigned m_parameterSlots;
840     // The number of var args passed to the next var arg node.
841     unsigned m_numPassedVarArgs;
842
843     HashMap<ConstantBufferKey, unsigned> m_constantBufferCache;
844     
845     Vector<VariableWatchpointSet*, 16> m_localWatchpoints;
846     
847     struct InlineStackEntry {
848         ByteCodeParser* m_byteCodeParser;
849         
850         CodeBlock* m_codeBlock;
851         CodeBlock* m_profiledBlock;
852         InlineCallFrame* m_inlineCallFrame;
853         
854         ScriptExecutable* executable() { return m_codeBlock->ownerExecutable(); }
855         
856         QueryableExitProfile m_exitProfile;
857         
858         // Remapping of identifier and constant numbers from the code block being
859         // inlined (inline callee) to the code block that we're inlining into
860         // (the machine code block, which is the transitive, though not necessarily
861         // direct, caller).
862         Vector<unsigned> m_identifierRemap;
863         Vector<unsigned> m_constantBufferRemap;
864         Vector<unsigned> m_switchRemap;
865         
866         // Blocks introduced by this code block, which need successor linking.
867         // May include up to one basic block that includes the continuation after
868         // the callsite in the caller. These must be appended in the order that they
869         // are created, but their bytecodeBegin values need not be in order as they
870         // are ignored.
871         Vector<UnlinkedBlock> m_unlinkedBlocks;
872         
873         // Potential block linking targets. Must be sorted by bytecodeBegin, and
874         // cannot have two blocks that have the same bytecodeBegin. For this very
875         // reason, this is not equivalent to 
876         Vector<BasicBlock*> m_blockLinkingTargets;
877         
878         // If the callsite's basic block was split into two, then this will be
879         // the head of the callsite block. It needs its successors linked to the
880         // m_unlinkedBlocks, but not the other way around: there's no way for
881         // any blocks in m_unlinkedBlocks to jump back into this block.
882         BasicBlock* m_callsiteBlockHead;
883         
884         // Does the callsite block head need linking? This is typically true
885         // but will be false for the machine code block's inline stack entry
886         // (since that one is not inlined) and for cases where an inline callee
887         // did the linking for us.
888         bool m_callsiteBlockHeadNeedsLinking;
889         
890         VirtualRegister m_returnValue;
891         
892         // Speculations about variable types collected from the profiled code block,
893         // which are based on OSR exit profiles that past DFG compilatins of this
894         // code block had gathered.
895         LazyOperandValueProfileParser m_lazyOperands;
896         
897         CallLinkInfoMap m_callLinkInfos;
898         StubInfoMap m_stubInfos;
899         
900         // Did we see any returns? We need to handle the (uncommon but necessary)
901         // case where a procedure that does not return was inlined.
902         bool m_didReturn;
903         
904         // Did we have any early returns?
905         bool m_didEarlyReturn;
906         
907         // Pointers to the argument position trackers for this slice of code.
908         Vector<ArgumentPosition*> m_argumentPositions;
909         
910         InlineStackEntry* m_caller;
911         
912         InlineStackEntry(
913             ByteCodeParser*,
914             CodeBlock*,
915             CodeBlock* profiledBlock,
916             BasicBlock* callsiteBlockHead,
917             JSFunction* callee, // Null if this is a closure call.
918             VirtualRegister returnValueVR,
919             VirtualRegister inlineCallFrameStart,
920             int argumentCountIncludingThis,
921             InlineCallFrame::Kind);
922         
923         ~InlineStackEntry()
924         {
925             m_byteCodeParser->m_inlineStackTop = m_caller;
926         }
927         
928         VirtualRegister remapOperand(VirtualRegister operand) const
929         {
930             if (!m_inlineCallFrame)
931                 return operand;
932             
933             ASSERT(!operand.isConstant());
934
935             return VirtualRegister(operand.offset() + m_inlineCallFrame->stackOffset);
936         }
937     };
938     
939     InlineStackEntry* m_inlineStackTop;
940     
941     struct DelayedSetLocal {
942         VirtualRegister m_operand;
943         Node* m_value;
944         
945         DelayedSetLocal() { }
946         DelayedSetLocal(VirtualRegister operand, Node* value)
947             : m_operand(operand)
948             , m_value(value)
949         {
950         }
951         
952         Node* execute(ByteCodeParser* parser, SetMode setMode = NormalSet)
953         {
954             if (m_operand.isArgument())
955                 return parser->setArgument(m_operand, m_value, setMode);
956             return parser->setLocal(m_operand, m_value, setMode);
957         }
958     };
959     
960     Vector<DelayedSetLocal, 2> m_setLocalQueue;
961
962     // Have we built operand maps? We initialize them lazily, and only when doing
963     // inlining.
964     bool m_haveBuiltOperandMaps;
965     // Mapping between identifier names and numbers.
966     BorrowedIdentifierMap m_identifierMap;
967     
968     CodeBlock* m_dfgCodeBlock;
969     CallLinkStatus::ContextMap m_callContextMap;
970     StubInfoMap m_dfgStubInfos;
971     
972     Instruction* m_currentInstruction;
973 };
974
975 #define NEXT_OPCODE(name) \
976     m_currentIndex += OPCODE_LENGTH(name); \
977     continue
978
979 #define LAST_OPCODE(name) \
980     m_currentIndex += OPCODE_LENGTH(name); \
981     return shouldContinueParsing
982
983 void ByteCodeParser::handleCall(Instruction* pc, NodeType op, CodeSpecializationKind kind)
984 {
985     ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct));
986     handleCall(
987         pc[1].u.operand, op, kind, OPCODE_LENGTH(op_call),
988         pc[2].u.operand, pc[3].u.operand, -pc[4].u.operand);
989 }
990
991 void ByteCodeParser::handleCall(
992     int result, NodeType op, CodeSpecializationKind kind, unsigned instructionSize,
993     int callee, int argumentCountIncludingThis, int registerOffset)
994 {
995     Node* callTarget = get(VirtualRegister(callee));
996     
997     CallLinkStatus callLinkStatus = CallLinkStatus::computeFor(
998         m_inlineStackTop->m_profiledBlock, currentCodeOrigin(),
999         m_inlineStackTop->m_callLinkInfos, m_callContextMap);
1000     
1001     handleCall(
1002         result, op, InlineCallFrame::kindFor(kind), instructionSize, callTarget,
1003         argumentCountIncludingThis, registerOffset, callLinkStatus);
1004 }
1005     
1006 void ByteCodeParser::handleCall(
1007     int result, NodeType op, InlineCallFrame::Kind kind, unsigned instructionSize,
1008     Node* callTarget, int argumentCountIncludingThis, int registerOffset,
1009     CallLinkStatus callLinkStatus)
1010 {
1011     handleCall(
1012         result, op, kind, instructionSize, callTarget, argumentCountIncludingThis,
1013         registerOffset, callLinkStatus, getPrediction());
1014 }
1015
1016 void ByteCodeParser::handleCall(
1017     int result, NodeType op, InlineCallFrame::Kind kind, unsigned instructionSize,
1018     Node* callTarget, int argumentCountIncludingThis, int registerOffset,
1019     CallLinkStatus callLinkStatus, SpeculatedType prediction)
1020 {
1021     ASSERT(registerOffset <= 0);
1022     CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1023     
1024     if (callTarget->hasConstant())
1025         callLinkStatus = CallLinkStatus(callTarget->asJSValue()).setIsProved(true);
1026     
1027     if (!callLinkStatus.canOptimize()) {
1028         // Oddly, this conflates calls that haven't executed with calls that behaved sufficiently polymorphically
1029         // that we cannot optimize them.
1030         
1031         addCall(result, op, callTarget, argumentCountIncludingThis, registerOffset, prediction);
1032         return;
1033     }
1034     
1035     unsigned nextOffset = m_currentIndex + instructionSize;
1036
1037     if (InternalFunction* function = callLinkStatus.internalFunction()) {
1038         if (handleConstantInternalFunction(result, function, registerOffset, argumentCountIncludingThis, prediction, specializationKind)) {
1039             // This phantoming has to be *after* the code for the intrinsic, to signify that
1040             // the inputs must be kept alive whatever exits the intrinsic may do.
1041             addToGraph(Phantom, callTarget);
1042             emitArgumentPhantoms(registerOffset, argumentCountIncludingThis, specializationKind);
1043             return;
1044         }
1045         
1046         // Can only handle this using the generic call handler.
1047         addCall(result, op, callTarget, argumentCountIncludingThis, registerOffset, prediction);
1048         return;
1049     }
1050         
1051     Intrinsic intrinsic = callLinkStatus.intrinsicFor(specializationKind);
1052
1053     JSFunction* knownFunction = nullptr;
1054     if (intrinsic != NoIntrinsic) {
1055         emitFunctionChecks(callLinkStatus, callTarget, registerOffset, specializationKind);
1056             
1057         if (handleIntrinsic(result, intrinsic, registerOffset, argumentCountIncludingThis, prediction)) {
1058             // This phantoming has to be *after* the code for the intrinsic, to signify that
1059             // the inputs must be kept alive whatever exits the intrinsic may do.
1060             addToGraph(Phantom, callTarget);
1061             emitArgumentPhantoms(registerOffset, argumentCountIncludingThis, specializationKind);
1062             if (m_graph.compilation())
1063                 m_graph.compilation()->noticeInlinedCall();
1064             return;
1065         }
1066     } else if (handleInlining(callTarget, result, callLinkStatus, registerOffset, argumentCountIncludingThis, nextOffset, kind)) {
1067         if (m_graph.compilation())
1068             m_graph.compilation()->noticeInlinedCall();
1069         return;
1070 #if ENABLE(FTL_NATIVE_CALL_INLINING)
1071     } else if (isFTL(m_graph.m_plan.mode) && Options::optimizeNativeCalls()) {
1072         JSFunction* function = callLinkStatus.function();
1073         if (function && function->isHostFunction()) {
1074             emitFunctionChecks(callLinkStatus, callTarget, registerOffset, specializationKind);
1075             knownFunction = function;
1076
1077             if (op == Call) 
1078                 op = NativeCall;
1079             else {
1080                 ASSERT(op == Construct);
1081                 op = NativeConstruct;
1082             }
1083         }
1084 #endif
1085     }
1086     Node* call = addCall(result, op, callTarget, argumentCountIncludingThis, registerOffset, prediction);
1087
1088     if (knownFunction) 
1089         call->giveKnownFunction(knownFunction);
1090 }
1091
1092 void ByteCodeParser::emitFunctionChecks(const CallLinkStatus& callLinkStatus, Node* callTarget, int registerOffset, CodeSpecializationKind kind)
1093 {
1094     Node* thisArgument;
1095     if (kind == CodeForCall)
1096         thisArgument = get(virtualRegisterForArgument(0, registerOffset));
1097     else
1098         thisArgument = 0;
1099
1100     if (callLinkStatus.isProved()) {
1101         addToGraph(Phantom, callTarget, thisArgument);
1102         return;
1103     }
1104     
1105     ASSERT(callLinkStatus.canOptimize());
1106     
1107     if (JSFunction* function = callLinkStatus.function())
1108         addToGraph(CheckFunction, OpInfo(m_graph.freeze(function)), callTarget, thisArgument);
1109     else {
1110         ASSERT(callLinkStatus.structure());
1111         ASSERT(callLinkStatus.executable());
1112         
1113         addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(callLinkStatus.structure())), callTarget);
1114         addToGraph(CheckExecutable, OpInfo(callLinkStatus.executable()), callTarget, thisArgument);
1115     }
1116 }
1117
1118 void ByteCodeParser::emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis, CodeSpecializationKind kind)
1119 {
1120     for (int i = kind == CodeForCall ? 0 : 1; i < argumentCountIncludingThis; ++i)
1121         addToGraph(Phantom, get(virtualRegisterForArgument(i, registerOffset)));
1122 }
1123
1124 bool ByteCodeParser::handleInlining(Node* callTargetNode, int resultOperand, const CallLinkStatus& callLinkStatus, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind kind)
1125 {
1126     static const bool verbose = false;
1127     
1128     CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1129     
1130     if (verbose)
1131         dataLog("Considering inlining ", callLinkStatus, " into ", currentCodeOrigin(), "\n");
1132     
1133     // First, the really simple checks: do we have an actual JS function?
1134     if (!callLinkStatus.executable()) {
1135         if (verbose)
1136             dataLog("    Failing because there is no executable.\n");
1137         return false;
1138     }
1139     if (callLinkStatus.executable()->isHostFunction()) {
1140         if (verbose)
1141             dataLog("    Failing because it's a host function.\n");
1142         return false;
1143     }
1144     
1145     FunctionExecutable* executable = jsCast<FunctionExecutable*>(callLinkStatus.executable());
1146     
1147     // Does the number of arguments we're passing match the arity of the target? We currently
1148     // inline only if the number of arguments passed is greater than or equal to the number
1149     // arguments expected.
1150     if (static_cast<int>(executable->parameterCount()) + 1 > argumentCountIncludingThis) {
1151         if (verbose)
1152             dataLog("    Failing because of arity mismatch.\n");
1153         return false;
1154     }
1155     
1156     // Do we have a code block, and does the code block's size match the heuristics/requirements for
1157     // being an inline candidate? We might not have a code block if code was thrown away or if we
1158     // simply hadn't actually made this call yet. We could still theoretically attempt to inline it
1159     // if we had a static proof of what was being called; this might happen for example if you call a
1160     // global function, where watchpointing gives us static information. Overall, it's a rare case
1161     // because we expect that any hot callees would have already been compiled.
1162     CodeBlock* codeBlock = executable->baselineCodeBlockFor(specializationKind);
1163     if (!codeBlock) {
1164         if (verbose)
1165             dataLog("    Failing because no code block available.\n");
1166         return false;
1167     }
1168     CapabilityLevel capabilityLevel = inlineFunctionForCapabilityLevel(
1169         codeBlock, specializationKind, callLinkStatus.isClosureCall());
1170     if (!canInline(capabilityLevel)) {
1171         if (verbose)
1172             dataLog("    Failing because the function is not inlineable.\n");
1173         return false;
1174     }
1175     
1176     // Check if the caller is already too large. We do this check here because that's just
1177     // where we happen to also have the callee's code block, and we want that for the
1178     // purpose of unsetting SABI.
1179     if (!isSmallEnoughToInlineCodeInto(m_codeBlock)) {
1180         codeBlock->m_shouldAlwaysBeInlined = false;
1181         if (verbose)
1182             dataLog("    Failing because the caller is too large.\n");
1183         return false;
1184     }
1185     
1186     // FIXME: this should be better at predicting how much bloat we will introduce by inlining
1187     // this function.
1188     // https://bugs.webkit.org/show_bug.cgi?id=127627
1189     
1190     // Have we exceeded inline stack depth, or are we trying to inline a recursive call to
1191     // too many levels? If either of these are detected, then don't inline. We adjust our
1192     // heuristics if we are dealing with a function that cannot otherwise be compiled.
1193     
1194     unsigned depth = 0;
1195     unsigned recursion = 0;
1196     
1197     for (InlineStackEntry* entry = m_inlineStackTop; entry; entry = entry->m_caller) {
1198         ++depth;
1199         if (depth >= Options::maximumInliningDepth()) {
1200             if (verbose)
1201                 dataLog("    Failing because depth exceeded.\n");
1202             return false;
1203         }
1204         
1205         if (entry->executable() == executable) {
1206             ++recursion;
1207             if (recursion >= Options::maximumInliningRecursion()) {
1208                 if (verbose)
1209                     dataLog("    Failing because recursion detected.\n");
1210                 return false;
1211             }
1212         }
1213     }
1214     
1215     if (verbose)
1216         dataLog("    Committing to inlining.\n");
1217     
1218     // Now we know without a doubt that we are committed to inlining. So begin the process
1219     // by checking the callee (if necessary) and making sure that arguments and the callee
1220     // are flushed.
1221     emitFunctionChecks(callLinkStatus, callTargetNode, registerOffset, specializationKind);
1222     
1223     // FIXME: Don't flush constants!
1224     
1225     int inlineCallFrameStart = m_inlineStackTop->remapOperand(VirtualRegister(registerOffset)).offset() + JSStack::CallFrameHeaderSize;
1226     
1227     ensureLocals(
1228         VirtualRegister(inlineCallFrameStart).toLocal() + 1 +
1229         JSStack::CallFrameHeaderSize + codeBlock->m_numCalleeRegisters);
1230     
1231     size_t argumentPositionStart = m_graph.m_argumentPositions.size();
1232
1233     VirtualRegister resultReg(resultOperand);
1234     if (resultReg.isValid())
1235         resultReg = m_inlineStackTop->remapOperand(resultReg);
1236     
1237     InlineStackEntry inlineStackEntry(
1238         this, codeBlock, codeBlock, m_graph.lastBlock(), callLinkStatus.function(), resultReg,
1239         (VirtualRegister)inlineCallFrameStart, argumentCountIncludingThis, kind);
1240     
1241     // This is where the actual inlining really happens.
1242     unsigned oldIndex = m_currentIndex;
1243     m_currentIndex = 0;
1244
1245     InlineVariableData inlineVariableData;
1246     inlineVariableData.inlineCallFrame = m_inlineStackTop->m_inlineCallFrame;
1247     inlineVariableData.argumentPositionStart = argumentPositionStart;
1248     inlineVariableData.calleeVariable = 0;
1249     
1250     RELEASE_ASSERT(
1251         m_inlineStackTop->m_inlineCallFrame->isClosureCall
1252         == callLinkStatus.isClosureCall());
1253     if (callLinkStatus.isClosureCall()) {
1254         VariableAccessData* calleeVariable =
1255             set(VirtualRegister(JSStack::Callee), callTargetNode, ImmediateNakedSet)->variableAccessData();
1256         VariableAccessData* scopeVariable =
1257             set(VirtualRegister(JSStack::ScopeChain), addToGraph(GetScope, callTargetNode), ImmediateNakedSet)->variableAccessData();
1258         
1259         calleeVariable->mergeShouldNeverUnbox(true);
1260         scopeVariable->mergeShouldNeverUnbox(true);
1261         
1262         inlineVariableData.calleeVariable = calleeVariable;
1263     }
1264     
1265     m_graph.m_inlineVariableData.append(inlineVariableData);
1266     
1267     parseCodeBlock();
1268     prepareToParseBlock(); // Reset our state now that we're back to the outer code.
1269     
1270     m_currentIndex = oldIndex;
1271     
1272     // If the inlined code created some new basic blocks, then we have linking to do.
1273     if (inlineStackEntry.m_callsiteBlockHead != m_graph.lastBlock()) {
1274         
1275         ASSERT(!inlineStackEntry.m_unlinkedBlocks.isEmpty());
1276         if (inlineStackEntry.m_callsiteBlockHeadNeedsLinking)
1277             linkBlock(inlineStackEntry.m_callsiteBlockHead, inlineStackEntry.m_blockLinkingTargets);
1278         else
1279             ASSERT(inlineStackEntry.m_callsiteBlockHead->isLinked);
1280         
1281         // It's possible that the callsite block head is not owned by the caller.
1282         if (!inlineStackEntry.m_caller->m_unlinkedBlocks.isEmpty()) {
1283             // It's definitely owned by the caller, because the caller created new blocks.
1284             // Assert that this all adds up.
1285             ASSERT(inlineStackEntry.m_caller->m_unlinkedBlocks.last().m_block == inlineStackEntry.m_callsiteBlockHead);
1286             ASSERT(inlineStackEntry.m_caller->m_unlinkedBlocks.last().m_needsNormalLinking);
1287             inlineStackEntry.m_caller->m_unlinkedBlocks.last().m_needsNormalLinking = false;
1288         } else {
1289             // It's definitely not owned by the caller. Tell the caller that he does not
1290             // need to link his callsite block head, because we did it for him.
1291             ASSERT(inlineStackEntry.m_caller->m_callsiteBlockHeadNeedsLinking);
1292             ASSERT(inlineStackEntry.m_caller->m_callsiteBlockHead == inlineStackEntry.m_callsiteBlockHead);
1293             inlineStackEntry.m_caller->m_callsiteBlockHeadNeedsLinking = false;
1294         }
1295         
1296         linkBlocks(inlineStackEntry.m_unlinkedBlocks, inlineStackEntry.m_blockLinkingTargets);
1297     } else
1298         ASSERT(inlineStackEntry.m_unlinkedBlocks.isEmpty());
1299     
1300     BasicBlock* lastBlock = m_graph.lastBlock();
1301     // If there was a return, but no early returns, then we're done. We allow parsing of
1302     // the caller to continue in whatever basic block we're in right now.
1303     if (!inlineStackEntry.m_didEarlyReturn && inlineStackEntry.m_didReturn) {
1304         ASSERT(lastBlock->isEmpty() || !lastBlock->last()->isTerminal());
1305         
1306         // If we created new blocks then the last block needs linking, but in the
1307         // caller. It doesn't need to be linked to, but it needs outgoing links.
1308         if (!inlineStackEntry.m_unlinkedBlocks.isEmpty()) {
1309             // For debugging purposes, set the bytecodeBegin. Note that this doesn't matter
1310             // for release builds because this block will never serve as a potential target
1311             // in the linker's binary search.
1312             lastBlock->bytecodeBegin = m_currentIndex;
1313             m_inlineStackTop->m_caller->m_unlinkedBlocks.append(UnlinkedBlock(m_graph.lastBlock()));
1314         }
1315         
1316         m_currentBlock = m_graph.lastBlock();
1317         return true;
1318     }
1319     
1320     // If we get to this point then all blocks must end in some sort of terminals.
1321     ASSERT(lastBlock->last()->isTerminal());
1322     
1323
1324     // Need to create a new basic block for the continuation at the caller.
1325     RefPtr<BasicBlock> block = adoptRef(new BasicBlock(nextOffset, m_numArguments, m_numLocals, PNaN));
1326
1327     // Link the early returns to the basic block we're about to create.
1328     for (size_t i = 0; i < inlineStackEntry.m_unlinkedBlocks.size(); ++i) {
1329         if (!inlineStackEntry.m_unlinkedBlocks[i].m_needsEarlyReturnLinking)
1330             continue;
1331         BasicBlock* blockToLink = inlineStackEntry.m_unlinkedBlocks[i].m_block;
1332         ASSERT(!blockToLink->isLinked);
1333         Node* node = blockToLink->last();
1334         ASSERT(node->op() == Jump);
1335         ASSERT(!node->targetBlock());
1336         node->targetBlock() = block.get();
1337         inlineStackEntry.m_unlinkedBlocks[i].m_needsEarlyReturnLinking = false;
1338 #if !ASSERT_DISABLED
1339         blockToLink->isLinked = true;
1340 #endif
1341     }
1342     
1343     m_currentBlock = block.get();
1344     ASSERT(m_inlineStackTop->m_caller->m_blockLinkingTargets.isEmpty() || m_inlineStackTop->m_caller->m_blockLinkingTargets.last()->bytecodeBegin < nextOffset);
1345     m_inlineStackTop->m_caller->m_unlinkedBlocks.append(UnlinkedBlock(block.get()));
1346     m_inlineStackTop->m_caller->m_blockLinkingTargets.append(block.get());
1347     m_graph.appendBlock(block);
1348     prepareToParseBlock();
1349     
1350     // At this point we return and continue to generate code for the caller, but
1351     // in the new basic block.
1352     return true;
1353 }
1354
1355 bool ByteCodeParser::handleMinMax(int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis)
1356 {
1357     if (argumentCountIncludingThis == 1) { // Math.min()
1358         set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN)));
1359         return true;
1360     }
1361      
1362     if (argumentCountIncludingThis == 2) { // Math.min(x)
1363         Node* result = get(VirtualRegister(virtualRegisterForArgument(1, registerOffset)));
1364         addToGraph(Phantom, Edge(result, NumberUse));
1365         set(VirtualRegister(resultOperand), result);
1366         return true;
1367     }
1368     
1369     if (argumentCountIncludingThis == 3) { // Math.min(x, y)
1370         set(VirtualRegister(resultOperand), addToGraph(op, get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset))));
1371         return true;
1372     }
1373     
1374     // Don't handle >=3 arguments for now.
1375     return false;
1376 }
1377
1378 bool ByteCodeParser::handleIntrinsic(int resultOperand, Intrinsic intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction)
1379 {
1380     switch (intrinsic) {
1381     case AbsIntrinsic: {
1382         if (argumentCountIncludingThis == 1) { // Math.abs()
1383             set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN)));
1384             return true;
1385         }
1386
1387         if (!MacroAssembler::supportsFloatingPointAbs())
1388             return false;
1389
1390         Node* node = addToGraph(ArithAbs, get(virtualRegisterForArgument(1, registerOffset)));
1391         if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
1392             node->mergeFlags(NodeMayOverflowInDFG);
1393         set(VirtualRegister(resultOperand), node);
1394         return true;
1395     }
1396
1397     case MinIntrinsic:
1398         return handleMinMax(resultOperand, ArithMin, registerOffset, argumentCountIncludingThis);
1399         
1400     case MaxIntrinsic:
1401         return handleMinMax(resultOperand, ArithMax, registerOffset, argumentCountIncludingThis);
1402         
1403     case SqrtIntrinsic:
1404     case CosIntrinsic:
1405     case SinIntrinsic: {
1406         if (argumentCountIncludingThis == 1) {
1407             set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN)));
1408             return true;
1409         }
1410         
1411         switch (intrinsic) {
1412         case SqrtIntrinsic:
1413             if (!MacroAssembler::supportsFloatingPointSqrt())
1414                 return false;
1415             
1416             set(VirtualRegister(resultOperand), addToGraph(ArithSqrt, get(virtualRegisterForArgument(1, registerOffset))));
1417             return true;
1418             
1419         case CosIntrinsic:
1420             set(VirtualRegister(resultOperand), addToGraph(ArithCos, get(virtualRegisterForArgument(1, registerOffset))));
1421             return true;
1422             
1423         case SinIntrinsic:
1424             set(VirtualRegister(resultOperand), addToGraph(ArithSin, get(virtualRegisterForArgument(1, registerOffset))));
1425             return true;
1426             
1427         default:
1428             RELEASE_ASSERT_NOT_REACHED();
1429             return false;
1430         }
1431     }
1432         
1433     case ArrayPushIntrinsic: {
1434         if (argumentCountIncludingThis != 2)
1435             return false;
1436         
1437         ArrayMode arrayMode = getArrayMode(m_currentInstruction[OPCODE_LENGTH(op_call) - 2].u.arrayProfile);
1438         if (!arrayMode.isJSArray())
1439             return false;
1440         switch (arrayMode.type()) {
1441         case Array::Undecided:
1442         case Array::Int32:
1443         case Array::Double:
1444         case Array::Contiguous:
1445         case Array::ArrayStorage: {
1446             Node* arrayPush = addToGraph(ArrayPush, OpInfo(arrayMode.asWord()), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)));
1447             set(VirtualRegister(resultOperand), arrayPush);
1448             
1449             return true;
1450         }
1451             
1452         default:
1453             return false;
1454         }
1455     }
1456         
1457     case ArrayPopIntrinsic: {
1458         if (argumentCountIncludingThis != 1)
1459             return false;
1460         
1461         ArrayMode arrayMode = getArrayMode(m_currentInstruction[OPCODE_LENGTH(op_call) - 2].u.arrayProfile);
1462         if (!arrayMode.isJSArray())
1463             return false;
1464         switch (arrayMode.type()) {
1465         case Array::Int32:
1466         case Array::Double:
1467         case Array::Contiguous:
1468         case Array::ArrayStorage: {
1469             Node* arrayPop = addToGraph(ArrayPop, OpInfo(arrayMode.asWord()), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)));
1470             set(VirtualRegister(resultOperand), arrayPop);
1471             return true;
1472         }
1473             
1474         default:
1475             return false;
1476         }
1477     }
1478
1479     case CharCodeAtIntrinsic: {
1480         if (argumentCountIncludingThis != 2)
1481             return false;
1482
1483         VirtualRegister thisOperand = virtualRegisterForArgument(0, registerOffset);
1484         VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
1485         Node* charCode = addToGraph(StringCharCodeAt, OpInfo(ArrayMode(Array::String).asWord()), get(thisOperand), get(indexOperand));
1486
1487         set(VirtualRegister(resultOperand), charCode);
1488         return true;
1489     }
1490
1491     case CharAtIntrinsic: {
1492         if (argumentCountIncludingThis != 2)
1493             return false;
1494
1495         VirtualRegister thisOperand = virtualRegisterForArgument(0, registerOffset);
1496         VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
1497         Node* charCode = addToGraph(StringCharAt, OpInfo(ArrayMode(Array::String).asWord()), get(thisOperand), get(indexOperand));
1498
1499         set(VirtualRegister(resultOperand), charCode);
1500         return true;
1501     }
1502     case FromCharCodeIntrinsic: {
1503         if (argumentCountIncludingThis != 2)
1504             return false;
1505
1506         VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
1507         Node* charCode = addToGraph(StringFromCharCode, get(indexOperand));
1508
1509         set(VirtualRegister(resultOperand), charCode);
1510
1511         return true;
1512     }
1513
1514     case RegExpExecIntrinsic: {
1515         if (argumentCountIncludingThis != 2)
1516             return false;
1517         
1518         Node* regExpExec = addToGraph(RegExpExec, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)));
1519         set(VirtualRegister(resultOperand), regExpExec);
1520         
1521         return true;
1522     }
1523         
1524     case RegExpTestIntrinsic: {
1525         if (argumentCountIncludingThis != 2)
1526             return false;
1527         
1528         Node* regExpExec = addToGraph(RegExpTest, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)));
1529         set(VirtualRegister(resultOperand), regExpExec);
1530         
1531         return true;
1532     }
1533
1534     case IMulIntrinsic: {
1535         if (argumentCountIncludingThis != 3)
1536             return false;
1537         VirtualRegister leftOperand = virtualRegisterForArgument(1, registerOffset);
1538         VirtualRegister rightOperand = virtualRegisterForArgument(2, registerOffset);
1539         Node* left = get(leftOperand);
1540         Node* right = get(rightOperand);
1541         set(VirtualRegister(resultOperand), addToGraph(ArithIMul, left, right));
1542         return true;
1543     }
1544         
1545     case FRoundIntrinsic: {
1546         if (argumentCountIncludingThis != 2)
1547             return false;
1548         VirtualRegister operand = virtualRegisterForArgument(1, registerOffset);
1549         set(VirtualRegister(resultOperand), addToGraph(ArithFRound, get(operand)));
1550         return true;
1551     }
1552         
1553     case DFGTrueIntrinsic: {
1554         set(VirtualRegister(resultOperand), jsConstant(jsBoolean(true)));
1555         return true;
1556     }
1557         
1558     case OSRExitIntrinsic: {
1559         addToGraph(ForceOSRExit);
1560         set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantUndefined)));
1561         return true;
1562     }
1563         
1564     case IsFinalTierIntrinsic: {
1565         set(VirtualRegister(resultOperand),
1566             jsConstant(jsBoolean(Options::useFTLJIT() ? isFTL(m_graph.m_plan.mode) : true)));
1567         return true;
1568     }
1569         
1570     case SetInt32HeapPredictionIntrinsic: {
1571         for (int i = 1; i < argumentCountIncludingThis; ++i) {
1572             Node* node = get(virtualRegisterForArgument(i, registerOffset));
1573             if (node->hasHeapPrediction())
1574                 node->setHeapPrediction(SpecInt32);
1575         }
1576         set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantUndefined)));
1577         return true;
1578     }
1579         
1580     case FiatInt52Intrinsic: {
1581         if (argumentCountIncludingThis != 2)
1582             return false;
1583         VirtualRegister operand = virtualRegisterForArgument(1, registerOffset);
1584         if (enableInt52())
1585             set(VirtualRegister(resultOperand), addToGraph(FiatInt52, get(operand)));
1586         else
1587             set(VirtualRegister(resultOperand), get(operand));
1588         return true;
1589     }
1590         
1591     default:
1592         return false;
1593     }
1594 }
1595
1596 bool ByteCodeParser::handleTypedArrayConstructor(
1597     int resultOperand, InternalFunction* function, int registerOffset,
1598     int argumentCountIncludingThis, TypedArrayType type)
1599 {
1600     if (!isTypedView(type))
1601         return false;
1602     
1603     if (function->classInfo() != constructorClassInfoForType(type))
1604         return false;
1605     
1606     if (function->globalObject() != m_inlineStackTop->m_codeBlock->globalObject())
1607         return false;
1608     
1609     // We only have an intrinsic for the case where you say:
1610     //
1611     // new FooArray(blah);
1612     //
1613     // Of course, 'blah' could be any of the following:
1614     //
1615     // - Integer, indicating that you want to allocate an array of that length.
1616     //   This is the thing we're hoping for, and what we can actually do meaningful
1617     //   optimizations for.
1618     //
1619     // - Array buffer, indicating that you want to create a view onto that _entire_
1620     //   buffer.
1621     //
1622     // - Non-buffer object, indicating that you want to create a copy of that
1623     //   object by pretending that it quacks like an array.
1624     //
1625     // - Anything else, indicating that you want to have an exception thrown at
1626     //   you.
1627     //
1628     // The intrinsic, NewTypedArray, will behave as if it could do any of these
1629     // things up until we do Fixup. Thereafter, if child1 (i.e. 'blah') is
1630     // predicted Int32, then we lock it in as a normal typed array allocation.
1631     // Otherwise, NewTypedArray turns into a totally opaque function call that
1632     // may clobber the world - by virtue of it accessing properties on what could
1633     // be an object.
1634     //
1635     // Note that although the generic form of NewTypedArray sounds sort of awful,
1636     // it is actually quite likely to be more efficient than a fully generic
1637     // Construct. So, we might want to think about making NewTypedArray variadic,
1638     // or else making Construct not super slow.
1639     
1640     if (argumentCountIncludingThis != 2)
1641         return false;
1642     
1643     set(VirtualRegister(resultOperand),
1644         addToGraph(NewTypedArray, OpInfo(type), get(virtualRegisterForArgument(1, registerOffset))));
1645     return true;
1646 }
1647
1648 bool ByteCodeParser::handleConstantInternalFunction(
1649     int resultOperand, InternalFunction* function, int registerOffset,
1650     int argumentCountIncludingThis, SpeculatedType prediction, CodeSpecializationKind kind)
1651 {
1652     // If we ever find that we have a lot of internal functions that we specialize for,
1653     // then we should probably have some sort of hashtable dispatch, or maybe even
1654     // dispatch straight through the MethodTable of the InternalFunction. But for now,
1655     // it seems that this case is hit infrequently enough, and the number of functions
1656     // we know about is small enough, that having just a linear cascade of if statements
1657     // is good enough.
1658     
1659     UNUSED_PARAM(prediction); // Remove this once we do more things.
1660     
1661     if (function->classInfo() == ArrayConstructor::info()) {
1662         if (function->globalObject() != m_inlineStackTop->m_codeBlock->globalObject())
1663             return false;
1664         
1665         if (argumentCountIncludingThis == 2) {
1666             set(VirtualRegister(resultOperand),
1667                 addToGraph(NewArrayWithSize, OpInfo(ArrayWithUndecided), get(virtualRegisterForArgument(1, registerOffset))));
1668             return true;
1669         }
1670         
1671         for (int i = 1; i < argumentCountIncludingThis; ++i)
1672             addVarArgChild(get(virtualRegisterForArgument(i, registerOffset)));
1673         set(VirtualRegister(resultOperand),
1674             addToGraph(Node::VarArg, NewArray, OpInfo(ArrayWithUndecided), OpInfo(0)));
1675         return true;
1676     }
1677     
1678     if (function->classInfo() == StringConstructor::info()) {
1679         Node* result;
1680         
1681         if (argumentCountIncludingThis <= 1)
1682             result = jsConstant(m_vm->smallStrings.emptyString());
1683         else
1684             result = addToGraph(ToString, get(virtualRegisterForArgument(1, registerOffset)));
1685         
1686         if (kind == CodeForConstruct)
1687             result = addToGraph(NewStringObject, OpInfo(function->globalObject()->stringObjectStructure()), result);
1688         
1689         set(VirtualRegister(resultOperand), result);
1690         return true;
1691     }
1692     
1693     for (unsigned typeIndex = 0; typeIndex < NUMBER_OF_TYPED_ARRAY_TYPES; ++typeIndex) {
1694         bool result = handleTypedArrayConstructor(
1695             resultOperand, function, registerOffset, argumentCountIncludingThis,
1696             indexToTypedArrayType(typeIndex));
1697         if (result)
1698             return true;
1699     }
1700     
1701     return false;
1702 }
1703
1704 Node* ByteCodeParser::handleGetByOffset(SpeculatedType prediction, Node* base, const StructureSet& structureSet, unsigned identifierNumber, PropertyOffset offset, NodeType op)
1705 {
1706     if (base->hasConstant()) {
1707         if (JSValue constant = m_graph.tryGetConstantProperty(base->asJSValue(), structureSet, offset)) {
1708             addToGraph(Phantom, base);
1709             return weakJSConstant(constant);
1710         }
1711     }
1712     
1713     Node* propertyStorage;
1714     if (isInlineOffset(offset))
1715         propertyStorage = base;
1716     else
1717         propertyStorage = addToGraph(GetButterfly, base);
1718     Node* getByOffset = addToGraph(op, OpInfo(m_graph.m_storageAccessData.size()), OpInfo(prediction), propertyStorage, base);
1719
1720     StorageAccessData storageAccessData;
1721     storageAccessData.offset = offset;
1722     storageAccessData.identifierNumber = identifierNumber;
1723     m_graph.m_storageAccessData.append(storageAccessData);
1724
1725     return getByOffset;
1726 }
1727
1728 Node* ByteCodeParser::handlePutByOffset(Node* base, unsigned identifier, PropertyOffset offset, Node* value)
1729 {
1730     Node* propertyStorage;
1731     if (isInlineOffset(offset))
1732         propertyStorage = base;
1733     else
1734         propertyStorage = addToGraph(GetButterfly, base);
1735     Node* result = addToGraph(PutByOffset, OpInfo(m_graph.m_storageAccessData.size()), propertyStorage, base, value);
1736     
1737     StorageAccessData storageAccessData;
1738     storageAccessData.offset = offset;
1739     storageAccessData.identifierNumber = identifier;
1740     m_graph.m_storageAccessData.append(storageAccessData);
1741
1742     return result;
1743 }
1744
1745 void ByteCodeParser::emitChecks(const ConstantStructureCheckVector& vector)
1746 {
1747     for (unsigned i = 0; i < vector.size(); ++i)
1748         cellConstantWithStructureCheck(vector[i].constant(), vector[i].structure());
1749 }
1750
1751 void ByteCodeParser::handleGetById(
1752     int destinationOperand, SpeculatedType prediction, Node* base, unsigned identifierNumber,
1753     const GetByIdStatus& getByIdStatus)
1754 {
1755     NodeType getById = getByIdStatus.makesCalls() ? GetByIdFlush : GetById;
1756     
1757     if (!getByIdStatus.isSimple() || !Options::enableAccessInlining()) {
1758         set(VirtualRegister(destinationOperand),
1759             addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base));
1760         return;
1761     }
1762     
1763     if (getByIdStatus.numVariants() > 1) {
1764         if (getByIdStatus.makesCalls() || !isFTL(m_graph.m_plan.mode)
1765             || !Options::enablePolymorphicAccessInlining()) {
1766             set(VirtualRegister(destinationOperand),
1767                 addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base));
1768             return;
1769         }
1770         
1771         if (m_graph.compilation())
1772             m_graph.compilation()->noticeInlinedGetById();
1773     
1774         // 1) Emit prototype structure checks for all chains. This could sort of maybe not be
1775         //    optimal, if there is some rarely executed case in the chain that requires a lot
1776         //    of checks and those checks are not watchpointable.
1777         for (unsigned variantIndex = getByIdStatus.numVariants(); variantIndex--;)
1778             emitChecks(getByIdStatus[variantIndex].constantChecks());
1779         
1780         // 2) Emit a MultiGetByOffset
1781         MultiGetByOffsetData* data = m_graph.m_multiGetByOffsetData.add();
1782         data->variants = getByIdStatus.variants();
1783         data->identifierNumber = identifierNumber;
1784         set(VirtualRegister(destinationOperand),
1785             addToGraph(MultiGetByOffset, OpInfo(data), OpInfo(prediction), base));
1786         return;
1787     }
1788     
1789     ASSERT(getByIdStatus.numVariants() == 1);
1790     GetByIdVariant variant = getByIdStatus[0];
1791                 
1792     if (m_graph.compilation())
1793         m_graph.compilation()->noticeInlinedGetById();
1794     
1795     Node* originalBase = base;
1796                 
1797     addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(variant.structureSet())), base);
1798     
1799     emitChecks(variant.constantChecks());
1800
1801     if (variant.alternateBase())
1802         base = weakJSConstant(variant.alternateBase());
1803     
1804     // Unless we want bugs like https://bugs.webkit.org/show_bug.cgi?id=88783, we need to
1805     // ensure that the base of the original get_by_id is kept alive until we're done with
1806     // all of the speculations. We only insert the Phantom if there had been a CheckStructure
1807     // on something other than the base following the CheckStructure on base.
1808     if (originalBase != base)
1809         addToGraph(Phantom, originalBase);
1810     
1811     Node* loadedValue = handleGetByOffset(
1812         variant.callLinkStatus() ? SpecCellOther : prediction,
1813         base, variant.baseStructure(), identifierNumber, variant.offset(),
1814         variant.callLinkStatus() ? GetGetterSetterByOffset : GetByOffset);
1815     
1816     if (!variant.callLinkStatus()) {
1817         set(VirtualRegister(destinationOperand), loadedValue);
1818         return;
1819     }
1820     
1821     Node* getter = addToGraph(GetGetter, loadedValue);
1822     
1823     // Make a call. We don't try to get fancy with using the smallest operand number because
1824     // the stack layout phase should compress the stack anyway.
1825     
1826     unsigned numberOfParameters = 0;
1827     numberOfParameters++; // The 'this' argument.
1828     numberOfParameters++; // True return PC.
1829     
1830     // Start with a register offset that corresponds to the last in-use register.
1831     int registerOffset = virtualRegisterForLocal(
1832         m_inlineStackTop->m_profiledBlock->m_numCalleeRegisters - 1).offset();
1833     registerOffset -= numberOfParameters;
1834     registerOffset -= JSStack::CallFrameHeaderSize;
1835     
1836     // Get the alignment right.
1837     registerOffset = -WTF::roundUpToMultipleOf(
1838         stackAlignmentRegisters(),
1839         -registerOffset);
1840     
1841     ensureLocals(
1842         m_inlineStackTop->remapOperand(
1843             VirtualRegister(registerOffset)).toLocal());
1844     
1845     // Issue SetLocals. This has two effects:
1846     // 1) That's how handleCall() sees the arguments.
1847     // 2) If we inline then this ensures that the arguments are flushed so that if you use
1848     //    the dreaded arguments object on the getter, the right things happen. Well, sort of -
1849     //    since we only really care about 'this' in this case. But we're not going to take that
1850     //    shortcut.
1851     int nextRegister = registerOffset + JSStack::CallFrameHeaderSize;
1852     set(VirtualRegister(nextRegister++), originalBase, ImmediateNakedSet);
1853     
1854     handleCall(
1855         destinationOperand, Call, InlineCallFrame::GetterCall, OPCODE_LENGTH(op_get_by_id),
1856         getter, numberOfParameters - 1, registerOffset, *variant.callLinkStatus(), prediction);
1857 }
1858
1859 void ByteCodeParser::emitPutById(
1860     Node* base, unsigned identifierNumber, Node* value, const PutByIdStatus& putByIdStatus, bool isDirect)
1861 {
1862     if (isDirect)
1863         addToGraph(PutByIdDirect, OpInfo(identifierNumber), base, value);
1864     else
1865         addToGraph(putByIdStatus.makesCalls() ? PutByIdFlush : PutById, OpInfo(identifierNumber), base, value);
1866 }
1867
1868 void ByteCodeParser::handlePutById(
1869     Node* base, unsigned identifierNumber, Node* value,
1870     const PutByIdStatus& putByIdStatus, bool isDirect)
1871 {
1872     if (!putByIdStatus.isSimple() || !Options::enableAccessInlining()) {
1873         if (!putByIdStatus.isSet())
1874             addToGraph(ForceOSRExit);
1875         emitPutById(base, identifierNumber, value, putByIdStatus, isDirect);
1876         return;
1877     }
1878     
1879     if (putByIdStatus.numVariants() > 1) {
1880         if (!isFTL(m_graph.m_plan.mode) || putByIdStatus.makesCalls()
1881             || !Options::enablePolymorphicAccessInlining()) {
1882             emitPutById(base, identifierNumber, value, putByIdStatus, isDirect);
1883             return;
1884         }
1885         
1886         if (m_graph.compilation())
1887             m_graph.compilation()->noticeInlinedPutById();
1888         
1889         if (!isDirect) {
1890             for (unsigned variantIndex = putByIdStatus.numVariants(); variantIndex--;) {
1891                 if (putByIdStatus[variantIndex].kind() != PutByIdVariant::Transition)
1892                     continue;
1893                 emitChecks(putByIdStatus[variantIndex].constantChecks());
1894             }
1895         }
1896         
1897         MultiPutByOffsetData* data = m_graph.m_multiPutByOffsetData.add();
1898         data->variants = putByIdStatus.variants();
1899         data->identifierNumber = identifierNumber;
1900         addToGraph(MultiPutByOffset, OpInfo(data), base, value);
1901         return;
1902     }
1903     
1904     ASSERT(putByIdStatus.numVariants() == 1);
1905     const PutByIdVariant& variant = putByIdStatus[0];
1906     
1907     switch (variant.kind()) {
1908     case PutByIdVariant::Replace: {
1909         addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(variant.structure())), base);
1910         handlePutByOffset(base, identifierNumber, variant.offset(), value);
1911         if (m_graph.compilation())
1912             m_graph.compilation()->noticeInlinedPutById();
1913         return;
1914     }
1915     
1916     case PutByIdVariant::Transition: {
1917         addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(variant.oldStructure())), base);
1918         emitChecks(variant.constantChecks());
1919
1920         ASSERT(variant.oldStructureForTransition()->transitionWatchpointSetHasBeenInvalidated());
1921     
1922         Node* propertyStorage;
1923         Transition* transition = m_graph.m_transitions.add(
1924             variant.oldStructureForTransition(), variant.newStructure());
1925
1926         if (variant.reallocatesStorage()) {
1927
1928             // If we're growing the property storage then it must be because we're
1929             // storing into the out-of-line storage.
1930             ASSERT(!isInlineOffset(variant.offset()));
1931
1932             if (!variant.oldStructureForTransition()->outOfLineCapacity()) {
1933                 propertyStorage = addToGraph(
1934                     AllocatePropertyStorage, OpInfo(transition), base);
1935             } else {
1936                 propertyStorage = addToGraph(
1937                     ReallocatePropertyStorage, OpInfo(transition),
1938                     base, addToGraph(GetButterfly, base));
1939             }
1940         } else {
1941             if (isInlineOffset(variant.offset()))
1942                 propertyStorage = base;
1943             else
1944                 propertyStorage = addToGraph(GetButterfly, base);
1945         }
1946
1947         addToGraph(PutStructure, OpInfo(transition), base);
1948
1949         addToGraph(
1950             PutByOffset,
1951             OpInfo(m_graph.m_storageAccessData.size()),
1952             propertyStorage,
1953             base,
1954             value);
1955
1956         StorageAccessData storageAccessData;
1957         storageAccessData.offset = variant.offset();
1958         storageAccessData.identifierNumber = identifierNumber;
1959         m_graph.m_storageAccessData.append(storageAccessData);
1960
1961         if (m_graph.compilation())
1962             m_graph.compilation()->noticeInlinedPutById();
1963         return;
1964     }
1965         
1966     case PutByIdVariant::Setter: {
1967         Node* originalBase = base;
1968         
1969         addToGraph(
1970             CheckStructure, OpInfo(m_graph.addStructureSet(variant.structure())), base);
1971         
1972         emitChecks(variant.constantChecks());
1973         
1974         if (variant.alternateBase())
1975             base = weakJSConstant(variant.alternateBase());
1976         
1977         Node* loadedValue = handleGetByOffset(
1978             SpecCellOther, base, variant.baseStructure(), identifierNumber, variant.offset(),
1979             GetGetterSetterByOffset);
1980         
1981         Node* setter = addToGraph(GetSetter, loadedValue);
1982         
1983         // Make a call. We don't try to get fancy with using the smallest operand number because
1984         // the stack layout phase should compress the stack anyway.
1985     
1986         unsigned numberOfParameters = 0;
1987         numberOfParameters++; // The 'this' argument.
1988         numberOfParameters++; // The new value.
1989         numberOfParameters++; // True return PC.
1990     
1991         // Start with a register offset that corresponds to the last in-use register.
1992         int registerOffset = virtualRegisterForLocal(
1993             m_inlineStackTop->m_profiledBlock->m_numCalleeRegisters - 1).offset();
1994         registerOffset -= numberOfParameters;
1995         registerOffset -= JSStack::CallFrameHeaderSize;
1996     
1997         // Get the alignment right.
1998         registerOffset = -WTF::roundUpToMultipleOf(
1999             stackAlignmentRegisters(),
2000             -registerOffset);
2001     
2002         ensureLocals(
2003             m_inlineStackTop->remapOperand(
2004                 VirtualRegister(registerOffset)).toLocal());
2005     
2006         int nextRegister = registerOffset + JSStack::CallFrameHeaderSize;
2007         set(VirtualRegister(nextRegister++), originalBase, ImmediateNakedSet);
2008         set(VirtualRegister(nextRegister++), value, ImmediateNakedSet);
2009     
2010         handleCall(
2011             VirtualRegister().offset(), Call, InlineCallFrame::SetterCall,
2012             OPCODE_LENGTH(op_put_by_id), setter, numberOfParameters - 1, registerOffset,
2013             *variant.callLinkStatus(), SpecOther);
2014         return;
2015     }
2016     
2017     default: {
2018         emitPutById(base, identifierNumber, value, putByIdStatus, isDirect);
2019         return;
2020     } }
2021 }
2022
2023 void ByteCodeParser::prepareToParseBlock()
2024 {
2025     m_constants.resize(0);
2026 }
2027
2028 Node* ByteCodeParser::getScope(bool skipTop, unsigned skipCount)
2029 {
2030     Node* localBase = get(VirtualRegister(JSStack::ScopeChain));
2031     if (skipTop) {
2032         ASSERT(!inlineCallFrame());
2033         localBase = addToGraph(SkipTopScope, localBase);
2034     }
2035     for (unsigned n = skipCount; n--;)
2036         localBase = addToGraph(SkipScope, localBase);
2037     return localBase;
2038 }
2039
2040 bool ByteCodeParser::parseBlock(unsigned limit)
2041 {
2042     bool shouldContinueParsing = true;
2043
2044     Interpreter* interpreter = m_vm->interpreter;
2045     Instruction* instructionsBegin = m_inlineStackTop->m_codeBlock->instructions().begin();
2046     unsigned blockBegin = m_currentIndex;
2047     
2048     // If we are the first basic block, introduce markers for arguments. This allows
2049     // us to track if a use of an argument may use the actual argument passed, as
2050     // opposed to using a value we set explicitly.
2051     if (m_currentBlock == m_graph.block(0) && !inlineCallFrame()) {
2052         m_graph.m_arguments.resize(m_numArguments);
2053         for (unsigned argument = 0; argument < m_numArguments; ++argument) {
2054             VariableAccessData* variable = newVariableAccessData(
2055                 virtualRegisterForArgument(argument), m_codeBlock->isCaptured(virtualRegisterForArgument(argument)));
2056             variable->mergeStructureCheckHoistingFailed(
2057                 m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
2058             variable->mergeCheckArrayHoistingFailed(
2059                 m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIndexingType));
2060             
2061             Node* setArgument = addToGraph(SetArgument, OpInfo(variable));
2062             m_graph.m_arguments[argument] = setArgument;
2063             m_currentBlock->variablesAtTail.setArgumentFirstTime(argument, setArgument);
2064         }
2065     }
2066
2067     while (true) {
2068         for (unsigned i = 0; i < m_setLocalQueue.size(); ++i)
2069             m_setLocalQueue[i].execute(this);
2070         m_setLocalQueue.resize(0);
2071         
2072         // Don't extend over jump destinations.
2073         if (m_currentIndex == limit) {
2074             // Ordinarily we want to plant a jump. But refuse to do this if the block is
2075             // empty. This is a special case for inlining, which might otherwise create
2076             // some empty blocks in some cases. When parseBlock() returns with an empty
2077             // block, it will get repurposed instead of creating a new one. Note that this
2078             // logic relies on every bytecode resulting in one or more nodes, which would
2079             // be true anyway except for op_loop_hint, which emits a Phantom to force this
2080             // to be true.
2081             if (!m_currentBlock->isEmpty())
2082                 addToGraph(Jump, OpInfo(m_currentIndex));
2083             return shouldContinueParsing;
2084         }
2085         
2086         // Switch on the current bytecode opcode.
2087         Instruction* currentInstruction = instructionsBegin + m_currentIndex;
2088         m_currentInstruction = currentInstruction; // Some methods want to use this, and we'd rather not thread it through calls.
2089         OpcodeID opcodeID = interpreter->getOpcodeID(currentInstruction->u.opcode);
2090         
2091         if (Options::verboseDFGByteCodeParsing())
2092             dataLog("    parsing ", currentCodeOrigin(), "\n");
2093         
2094         if (m_graph.compilation()) {
2095             addToGraph(CountExecution, OpInfo(m_graph.compilation()->executionCounterFor(
2096                 Profiler::OriginStack(*m_vm->m_perBytecodeProfiler, m_codeBlock, currentCodeOrigin()))));
2097         }
2098         
2099         switch (opcodeID) {
2100
2101         // === Function entry opcodes ===
2102
2103         case op_enter: {
2104             Node* undefined = addToGraph(JSConstant, OpInfo(m_constantUndefined));
2105             // Initialize all locals to undefined.
2106             for (int i = 0; i < m_inlineStackTop->m_codeBlock->m_numVars; ++i)
2107                 set(virtualRegisterForLocal(i), undefined, ImmediateNakedSet);
2108             if (m_inlineStackTop->m_codeBlock->specializationKind() == CodeForConstruct)
2109                 set(virtualRegisterForArgument(0), undefined, ImmediateNakedSet);
2110             NEXT_OPCODE(op_enter);
2111         }
2112             
2113         case op_touch_entry:
2114             if (m_inlineStackTop->m_codeBlock->symbolTable()->m_functionEnteredOnce.isStillValid())
2115                 addToGraph(ForceOSRExit);
2116             NEXT_OPCODE(op_touch_entry);
2117             
2118         case op_to_this: {
2119             Node* op1 = getThis();
2120             if (op1->op() != ToThis) {
2121                 Structure* cachedStructure = currentInstruction[2].u.structure.get();
2122                 if (currentInstruction[2].u.toThisStatus != ToThisOK
2123                     || !cachedStructure
2124                     || cachedStructure->classInfo()->methodTable.toThis != JSObject::info()->methodTable.toThis
2125                     || m_inlineStackTop->m_profiledBlock->couldTakeSlowCase(m_currentIndex)
2126                     || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)
2127                     || (op1->op() == GetLocal && op1->variableAccessData()->structureCheckHoistingFailed())) {
2128                     setThis(addToGraph(ToThis, op1));
2129                 } else {
2130                     addToGraph(
2131                         CheckStructure,
2132                         OpInfo(m_graph.addStructureSet(cachedStructure)),
2133                         op1);
2134                 }
2135             }
2136             NEXT_OPCODE(op_to_this);
2137         }
2138
2139         case op_create_this: {
2140             int calleeOperand = currentInstruction[2].u.operand;
2141             Node* callee = get(VirtualRegister(calleeOperand));
2142             bool alreadyEmitted = false;
2143             if (JSFunction* function = callee->dynamicCastConstant<JSFunction*>()) {
2144                 if (Structure* structure = function->allocationStructure()) {
2145                     addToGraph(AllocationProfileWatchpoint, OpInfo(m_graph.freeze(function)));
2146                     // The callee is still live up to this point.
2147                     addToGraph(Phantom, callee);
2148                     set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(NewObject, OpInfo(structure)));
2149                     alreadyEmitted = true;
2150                 }
2151             }
2152             if (!alreadyEmitted) {
2153                 set(VirtualRegister(currentInstruction[1].u.operand),
2154                     addToGraph(CreateThis, OpInfo(currentInstruction[3].u.operand), callee));
2155             }
2156             NEXT_OPCODE(op_create_this);
2157         }
2158
2159         case op_new_object: {
2160             set(VirtualRegister(currentInstruction[1].u.operand),
2161                 addToGraph(NewObject,
2162                     OpInfo(currentInstruction[3].u.objectAllocationProfile->structure())));
2163             NEXT_OPCODE(op_new_object);
2164         }
2165             
2166         case op_new_array: {
2167             int startOperand = currentInstruction[2].u.operand;
2168             int numOperands = currentInstruction[3].u.operand;
2169             ArrayAllocationProfile* profile = currentInstruction[4].u.arrayAllocationProfile;
2170             for (int operandIdx = startOperand; operandIdx > startOperand - numOperands; --operandIdx)
2171                 addVarArgChild(get(VirtualRegister(operandIdx)));
2172             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(Node::VarArg, NewArray, OpInfo(profile->selectIndexingType()), OpInfo(0)));
2173             NEXT_OPCODE(op_new_array);
2174         }
2175             
2176         case op_new_array_with_size: {
2177             int lengthOperand = currentInstruction[2].u.operand;
2178             ArrayAllocationProfile* profile = currentInstruction[3].u.arrayAllocationProfile;
2179             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(NewArrayWithSize, OpInfo(profile->selectIndexingType()), get(VirtualRegister(lengthOperand))));
2180             NEXT_OPCODE(op_new_array_with_size);
2181         }
2182             
2183         case op_new_array_buffer: {
2184             int startConstant = currentInstruction[2].u.operand;
2185             int numConstants = currentInstruction[3].u.operand;
2186             ArrayAllocationProfile* profile = currentInstruction[4].u.arrayAllocationProfile;
2187             NewArrayBufferData data;
2188             data.startConstant = m_inlineStackTop->m_constantBufferRemap[startConstant];
2189             data.numConstants = numConstants;
2190             data.indexingType = profile->selectIndexingType();
2191
2192             // If this statement has never executed, we'll have the wrong indexing type in the profile.
2193             for (int i = 0; i < numConstants; ++i) {
2194                 data.indexingType =
2195                     leastUpperBoundOfIndexingTypeAndValue(
2196                         data.indexingType,
2197                         m_codeBlock->constantBuffer(data.startConstant)[i]);
2198             }
2199             
2200             m_graph.m_newArrayBufferData.append(data);
2201             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(NewArrayBuffer, OpInfo(&m_graph.m_newArrayBufferData.last())));
2202             NEXT_OPCODE(op_new_array_buffer);
2203         }
2204             
2205         case op_new_regexp: {
2206             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(NewRegexp, OpInfo(currentInstruction[2].u.operand)));
2207             NEXT_OPCODE(op_new_regexp);
2208         }
2209             
2210         case op_get_callee: {
2211             JSCell* cachedFunction = currentInstruction[2].u.jsCell.get();
2212             if (!cachedFunction 
2213                 || m_inlineStackTop->m_profiledBlock->couldTakeSlowCase(m_currentIndex)
2214                 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadFunction)) {
2215                 set(VirtualRegister(currentInstruction[1].u.operand), get(VirtualRegister(JSStack::Callee)));
2216             } else {
2217                 FrozenValue* frozen = m_graph.freeze(cachedFunction);
2218                 ASSERT(cachedFunction->inherits(JSFunction::info()));
2219                 Node* actualCallee = get(VirtualRegister(JSStack::Callee));
2220                 addToGraph(CheckFunction, OpInfo(frozen), actualCallee);
2221                 set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(JSConstant, OpInfo(frozen)));
2222             }
2223             NEXT_OPCODE(op_get_callee);
2224         }
2225
2226         // === Bitwise operations ===
2227
2228         case op_bitand: {
2229             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2230             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2231             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(BitAnd, op1, op2));
2232             NEXT_OPCODE(op_bitand);
2233         }
2234
2235         case op_bitor: {
2236             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2237             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2238             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(BitOr, op1, op2));
2239             NEXT_OPCODE(op_bitor);
2240         }
2241
2242         case op_bitxor: {
2243             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2244             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2245             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(BitXor, op1, op2));
2246             NEXT_OPCODE(op_bitxor);
2247         }
2248
2249         case op_rshift: {
2250             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2251             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2252             set(VirtualRegister(currentInstruction[1].u.operand),
2253                 addToGraph(BitRShift, op1, op2));
2254             NEXT_OPCODE(op_rshift);
2255         }
2256
2257         case op_lshift: {
2258             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2259             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2260             set(VirtualRegister(currentInstruction[1].u.operand),
2261                 addToGraph(BitLShift, op1, op2));
2262             NEXT_OPCODE(op_lshift);
2263         }
2264
2265         case op_urshift: {
2266             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2267             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2268             set(VirtualRegister(currentInstruction[1].u.operand),
2269                 addToGraph(BitURShift, op1, op2));
2270             NEXT_OPCODE(op_urshift);
2271         }
2272             
2273         case op_unsigned: {
2274             set(VirtualRegister(currentInstruction[1].u.operand),
2275                 makeSafe(addToGraph(UInt32ToNumber, get(VirtualRegister(currentInstruction[2].u.operand)))));
2276             NEXT_OPCODE(op_unsigned);
2277         }
2278
2279         // === Increment/Decrement opcodes ===
2280
2281         case op_inc: {
2282             int srcDst = currentInstruction[1].u.operand;
2283             VirtualRegister srcDstVirtualRegister = VirtualRegister(srcDst);
2284             Node* op = get(srcDstVirtualRegister);
2285             set(srcDstVirtualRegister, makeSafe(addToGraph(ArithAdd, op, addToGraph(JSConstant, OpInfo(m_constantOne)))));
2286             NEXT_OPCODE(op_inc);
2287         }
2288
2289         case op_dec: {
2290             int srcDst = currentInstruction[1].u.operand;
2291             VirtualRegister srcDstVirtualRegister = VirtualRegister(srcDst);
2292             Node* op = get(srcDstVirtualRegister);
2293             set(srcDstVirtualRegister, makeSafe(addToGraph(ArithSub, op, addToGraph(JSConstant, OpInfo(m_constantOne)))));
2294             NEXT_OPCODE(op_dec);
2295         }
2296
2297         // === Arithmetic operations ===
2298
2299         case op_add: {
2300             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2301             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2302             if (op1->hasNumberResult() && op2->hasNumberResult())
2303                 set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithAdd, op1, op2)));
2304             else
2305                 set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ValueAdd, op1, op2)));
2306             NEXT_OPCODE(op_add);
2307         }
2308
2309         case op_sub: {
2310             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2311             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2312             set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithSub, op1, op2)));
2313             NEXT_OPCODE(op_sub);
2314         }
2315
2316         case op_negate: {
2317             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2318             set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithNegate, op1)));
2319             NEXT_OPCODE(op_negate);
2320         }
2321
2322         case op_mul: {
2323             // Multiply requires that the inputs are not truncated, unfortunately.
2324             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2325             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2326             set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithMul, op1, op2)));
2327             NEXT_OPCODE(op_mul);
2328         }
2329
2330         case op_mod: {
2331             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2332             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2333             set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithMod, op1, op2)));
2334             NEXT_OPCODE(op_mod);
2335         }
2336
2337         case op_div: {
2338             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2339             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2340             set(VirtualRegister(currentInstruction[1].u.operand), makeDivSafe(addToGraph(ArithDiv, op1, op2)));
2341             NEXT_OPCODE(op_div);
2342         }
2343
2344         // === Misc operations ===
2345
2346         case op_debug:
2347             addToGraph(Breakpoint);
2348             NEXT_OPCODE(op_debug);
2349
2350         case op_profile_will_call: {
2351             addToGraph(ProfileWillCall);
2352             NEXT_OPCODE(op_profile_will_call);
2353         }
2354
2355         case op_profile_did_call: {
2356             addToGraph(ProfileDidCall);
2357             NEXT_OPCODE(op_profile_did_call);
2358         }
2359
2360         case op_mov: {
2361             Node* op = get(VirtualRegister(currentInstruction[2].u.operand));
2362             set(VirtualRegister(currentInstruction[1].u.operand), op);
2363             NEXT_OPCODE(op_mov);
2364         }
2365             
2366         case op_captured_mov: {
2367             Node* op = get(VirtualRegister(currentInstruction[2].u.operand));
2368             if (VariableWatchpointSet* set = currentInstruction[3].u.watchpointSet) {
2369                 if (set->state() != IsInvalidated)
2370                     addToGraph(NotifyWrite, OpInfo(set), op);
2371             }
2372             set(VirtualRegister(currentInstruction[1].u.operand), op);
2373             NEXT_OPCODE(op_captured_mov);
2374         }
2375
2376         case op_check_has_instance:
2377             addToGraph(CheckHasInstance, get(VirtualRegister(currentInstruction[3].u.operand)));
2378             NEXT_OPCODE(op_check_has_instance);
2379
2380         case op_instanceof: {
2381             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2382             Node* prototype = get(VirtualRegister(currentInstruction[3].u.operand));
2383             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(InstanceOf, value, prototype));
2384             NEXT_OPCODE(op_instanceof);
2385         }
2386             
2387         case op_is_undefined: {
2388             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2389             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsUndefined, value));
2390             NEXT_OPCODE(op_is_undefined);
2391         }
2392
2393         case op_is_boolean: {
2394             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2395             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsBoolean, value));
2396             NEXT_OPCODE(op_is_boolean);
2397         }
2398
2399         case op_is_number: {
2400             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2401             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsNumber, value));
2402             NEXT_OPCODE(op_is_number);
2403         }
2404
2405         case op_is_string: {
2406             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2407             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsString, value));
2408             NEXT_OPCODE(op_is_string);
2409         }
2410
2411         case op_is_object: {
2412             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2413             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsObject, value));
2414             NEXT_OPCODE(op_is_object);
2415         }
2416
2417         case op_is_function: {
2418             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2419             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsFunction, value));
2420             NEXT_OPCODE(op_is_function);
2421         }
2422
2423         case op_not: {
2424             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2425             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, value));
2426             NEXT_OPCODE(op_not);
2427         }
2428             
2429         case op_to_primitive: {
2430             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2431             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(ToPrimitive, value));
2432             NEXT_OPCODE(op_to_primitive);
2433         }
2434             
2435         case op_strcat: {
2436             int startOperand = currentInstruction[2].u.operand;
2437             int numOperands = currentInstruction[3].u.operand;
2438 #if CPU(X86)
2439             // X86 doesn't have enough registers to compile MakeRope with three arguments.
2440             // Rather than try to be clever, we just make MakeRope dumber on this processor.
2441             const unsigned maxRopeArguments = 2;
2442 #else
2443             const unsigned maxRopeArguments = 3;
2444 #endif
2445             auto toStringNodes = std::make_unique<Node*[]>(numOperands);
2446             for (int i = 0; i < numOperands; i++)
2447                 toStringNodes[i] = addToGraph(ToString, get(VirtualRegister(startOperand - i)));
2448
2449             for (int i = 0; i < numOperands; i++)
2450                 addToGraph(Phantom, toStringNodes[i]);
2451
2452             Node* operands[AdjacencyList::Size];
2453             unsigned indexInOperands = 0;
2454             for (unsigned i = 0; i < AdjacencyList::Size; ++i)
2455                 operands[i] = 0;
2456             for (int operandIdx = 0; operandIdx < numOperands; ++operandIdx) {
2457                 if (indexInOperands == maxRopeArguments) {
2458                     operands[0] = addToGraph(MakeRope, operands[0], operands[1], operands[2]);
2459                     for (unsigned i = 1; i < AdjacencyList::Size; ++i)
2460                         operands[i] = 0;
2461                     indexInOperands = 1;
2462                 }
2463                 
2464                 ASSERT(indexInOperands < AdjacencyList::Size);
2465                 ASSERT(indexInOperands < maxRopeArguments);
2466                 operands[indexInOperands++] = toStringNodes[operandIdx];
2467             }
2468             set(VirtualRegister(currentInstruction[1].u.operand),
2469                 addToGraph(MakeRope, operands[0], operands[1], operands[2]));
2470             NEXT_OPCODE(op_strcat);
2471         }
2472
2473         case op_less: {
2474             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2475             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2476             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareLess, op1, op2));
2477             NEXT_OPCODE(op_less);
2478         }
2479
2480         case op_lesseq: {
2481             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2482             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2483             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareLessEq, op1, op2));
2484             NEXT_OPCODE(op_lesseq);
2485         }
2486
2487         case op_greater: {
2488             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2489             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2490             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareGreater, op1, op2));
2491             NEXT_OPCODE(op_greater);
2492         }
2493
2494         case op_greatereq: {
2495             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2496             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2497             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareGreaterEq, op1, op2));
2498             NEXT_OPCODE(op_greatereq);
2499         }
2500
2501         case op_eq: {
2502             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2503             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2504             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareEq, op1, op2));
2505             NEXT_OPCODE(op_eq);
2506         }
2507
2508         case op_eq_null: {
2509             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2510             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareEqConstant, value, addToGraph(JSConstant, OpInfo(m_constantNull))));
2511             NEXT_OPCODE(op_eq_null);
2512         }
2513
2514         case op_stricteq: {
2515             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2516             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2517             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareStrictEq, op1, op2));
2518             NEXT_OPCODE(op_stricteq);
2519         }
2520
2521         case op_neq: {
2522             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2523             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2524             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, addToGraph(CompareEq, op1, op2)));
2525             NEXT_OPCODE(op_neq);
2526         }
2527
2528         case op_neq_null: {
2529             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2530             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, addToGraph(CompareEqConstant, value, addToGraph(JSConstant, OpInfo(m_constantNull)))));
2531             NEXT_OPCODE(op_neq_null);
2532         }
2533
2534         case op_nstricteq: {
2535             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2536             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2537             Node* invertedResult;
2538             invertedResult = addToGraph(CompareStrictEq, op1, op2);
2539             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, invertedResult));
2540             NEXT_OPCODE(op_nstricteq);
2541         }
2542
2543         // === Property access operations ===
2544
2545         case op_get_by_val: {
2546             SpeculatedType prediction = getPredictionWithoutOSRExit();
2547             
2548             Node* base = get(VirtualRegister(currentInstruction[2].u.operand));
2549             ArrayMode arrayMode = getArrayModeConsideringSlowPath(currentInstruction[4].u.arrayProfile, Array::Read);
2550             Node* property = get(VirtualRegister(currentInstruction[3].u.operand));
2551             Node* getByVal = addToGraph(GetByVal, OpInfo(arrayMode.asWord()), OpInfo(prediction), base, property);
2552             set(VirtualRegister(currentInstruction[1].u.operand), getByVal);
2553
2554             NEXT_OPCODE(op_get_by_val);
2555         }
2556
2557         case op_put_by_val_direct:
2558         case op_put_by_val: {
2559             Node* base = get(VirtualRegister(currentInstruction[1].u.operand));
2560
2561             ArrayMode arrayMode = getArrayModeConsideringSlowPath(currentInstruction[4].u.arrayProfile, Array::Write);
2562             
2563             Node* property = get(VirtualRegister(currentInstruction[2].u.operand));
2564             Node* value = get(VirtualRegister(currentInstruction[3].u.operand));
2565             
2566             addVarArgChild(base);
2567             addVarArgChild(property);
2568             addVarArgChild(value);
2569             addVarArgChild(0); // Leave room for property storage.
2570             addVarArgChild(0); // Leave room for length.
2571             addToGraph(Node::VarArg, opcodeID == op_put_by_val_direct ? PutByValDirect : PutByVal, OpInfo(arrayMode.asWord()), OpInfo(0));
2572
2573             NEXT_OPCODE(op_put_by_val);
2574         }
2575             
2576         case op_get_by_id:
2577         case op_get_by_id_out_of_line:
2578         case op_get_array_length: {
2579             SpeculatedType prediction = getPrediction();
2580             
2581             Node* base = get(VirtualRegister(currentInstruction[2].u.operand));
2582             unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand];
2583             
2584             StringImpl* uid = m_graph.identifiers()[identifierNumber];
2585             GetByIdStatus getByIdStatus = GetByIdStatus::computeFor(
2586                 m_inlineStackTop->m_profiledBlock, m_dfgCodeBlock,
2587                 m_inlineStackTop->m_stubInfos, m_dfgStubInfos,
2588                 currentCodeOrigin(), uid);
2589             
2590             handleGetById(
2591                 currentInstruction[1].u.operand, prediction, base, identifierNumber, getByIdStatus);
2592
2593             NEXT_OPCODE(op_get_by_id);
2594         }
2595         case op_put_by_id:
2596         case op_put_by_id_out_of_line:
2597         case op_put_by_id_transition_direct:
2598         case op_put_by_id_transition_normal:
2599         case op_put_by_id_transition_direct_out_of_line:
2600         case op_put_by_id_transition_normal_out_of_line: {
2601             Node* value = get(VirtualRegister(currentInstruction[3].u.operand));
2602             Node* base = get(VirtualRegister(currentInstruction[1].u.operand));
2603             unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
2604             bool direct = currentInstruction[8].u.operand;
2605
2606             PutByIdStatus putByIdStatus = PutByIdStatus::computeFor(
2607                 m_inlineStackTop->m_profiledBlock, m_dfgCodeBlock,
2608                 m_inlineStackTop->m_stubInfos, m_dfgStubInfos,
2609                 currentCodeOrigin(), m_graph.identifiers()[identifierNumber]);
2610             
2611             handlePutById(base, identifierNumber, value, putByIdStatus, direct);
2612             NEXT_OPCODE(op_put_by_id);
2613         }
2614
2615         case op_init_global_const_nop: {
2616             NEXT_OPCODE(op_init_global_const_nop);
2617         }
2618
2619         case op_init_global_const: {
2620             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2621             addToGraph(
2622                 PutGlobalVar,
2623                 OpInfo(m_inlineStackTop->m_codeBlock->globalObject()->assertRegisterIsInThisObject(currentInstruction[1].u.registerPointer)),
2624                 value);
2625             NEXT_OPCODE(op_init_global_const);
2626         }
2627
2628         // === Block terminators. ===
2629
2630         case op_jmp: {
2631             int relativeOffset = currentInstruction[1].u.operand;
2632             if (relativeOffset <= 0)
2633                 flushForTerminal();
2634             addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
2635             LAST_OPCODE(op_jmp);
2636         }
2637
2638         case op_jtrue: {
2639             unsigned relativeOffset = currentInstruction[2].u.operand;
2640             Node* condition = get(VirtualRegister(currentInstruction[1].u.operand));
2641             addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jtrue))), condition);
2642             LAST_OPCODE(op_jtrue);
2643         }
2644
2645         case op_jfalse: {
2646             unsigned relativeOffset = currentInstruction[2].u.operand;
2647             Node* condition = get(VirtualRegister(currentInstruction[1].u.operand));
2648             addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jfalse), m_currentIndex + relativeOffset)), condition);
2649             LAST_OPCODE(op_jfalse);
2650         }
2651
2652         case op_jeq_null: {
2653             unsigned relativeOffset = currentInstruction[2].u.operand;
2654             Node* value = get(VirtualRegister(currentInstruction[1].u.operand));
2655             Node* condition = addToGraph(CompareEqConstant, value, addToGraph(JSConstant, OpInfo(m_constantNull)));
2656             addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jeq_null))), condition);
2657             LAST_OPCODE(op_jeq_null);
2658         }
2659
2660         case op_jneq_null: {
2661             unsigned relativeOffset = currentInstruction[2].u.operand;
2662             Node* value = get(VirtualRegister(currentInstruction[1].u.operand));
2663             Node* condition = addToGraph(CompareEqConstant, value, addToGraph(JSConstant, OpInfo(m_constantNull)));
2664             addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jneq_null), m_currentIndex + relativeOffset)), condition);
2665             LAST_OPCODE(op_jneq_null);
2666         }
2667
2668         case op_jless: {
2669             unsigned relativeOffset = currentInstruction[3].u.operand;
2670             Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
2671             Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
2672             Node* condition = addToGraph(CompareLess, op1, op2);
2673             addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jless))), condition);
2674             LAST_OPCODE(op_jless);
2675         }
2676
2677         case op_jlesseq: {
2678             unsigned relativeOffset = currentInstruction[3].u.operand;
2679             Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
2680             Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
2681             Node* condition = addToGraph(CompareLessEq, op1, op2);
2682             addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jlesseq))), condition);
2683             LAST_OPCODE(op_jlesseq);
2684         }
2685
2686         case op_jgreater: {
2687             unsigned relativeOffset = currentInstruction[3].u.operand;
2688             Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
2689             Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
2690             Node* condition = addToGraph(CompareGreater, op1, op2);
2691             addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jgreater))), condition);
2692             LAST_OPCODE(op_jgreater);
2693         }
2694
2695         case op_jgreatereq: {
2696             unsigned relativeOffset = currentInstruction[3].u.operand;
2697             Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
2698             Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
2699             Node* condition = addToGraph(CompareGreaterEq, op1, op2);
2700             addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jgreatereq))), condition);
2701             LAST_OPCODE(op_jgreatereq);
2702         }
2703
2704         case op_jnless: {
2705             unsigned relativeOffset = currentInstruction[3].u.operand;
2706             Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
2707             Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
2708             Node* condition = addToGraph(CompareLess, op1, op2);
2709             addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jnless), m_currentIndex + relativeOffset)), condition);
2710             LAST_OPCODE(op_jnless);
2711         }
2712
2713         case op_jnlesseq: {
2714             unsigned relativeOffset = currentInstruction[3].u.operand;
2715             Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
2716             Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
2717             Node* condition = addToGraph(CompareLessEq, op1, op2);
2718             addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jnlesseq), m_currentIndex + relativeOffset)), condition);
2719             LAST_OPCODE(op_jnlesseq);
2720         }
2721
2722         case op_jngreater: {
2723             unsigned relativeOffset = currentInstruction[3].u.operand;
2724             Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
2725             Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
2726             Node* condition = addToGraph(CompareGreater, op1, op2);
2727             addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jngreater), m_currentIndex + relativeOffset)), condition);
2728             LAST_OPCODE(op_jngreater);
2729         }
2730
2731         case op_jngreatereq: {
2732             unsigned relativeOffset = currentInstruction[3].u.operand;
2733             Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
2734             Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
2735             Node* condition = addToGraph(CompareGreaterEq, op1, op2);
2736             addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jngreatereq), m_currentIndex + relativeOffset)), condition);
2737             LAST_OPCODE(op_jngreatereq);
2738         }
2739             
2740         case op_switch_imm: {
2741             SwitchData& data = *m_graph.m_switchData.add();
2742             data.kind = SwitchImm;
2743             data.switchTableIndex = m_inlineStackTop->m_switchRemap[currentInstruction[1].u.operand];
2744             data.fallThrough.setBytecodeIndex(m_currentIndex + currentInstruction[2].u.operand);
2745             SimpleJumpTable& table = m_codeBlock->switchJumpTable(data.switchTableIndex);
2746             for (unsigned i = 0; i < table.branchOffsets.size(); ++i) {
2747                 if (!table.branchOffsets[i])
2748                     continue;
2749                 unsigned target = m_currentIndex + table.branchOffsets[i];
2750                 if (target == data.fallThrough.bytecodeIndex())
2751                     continue;
2752                 data.cases.append(SwitchCase::withBytecodeIndex(m_graph.freeze(jsNumber(static_cast<int32_t>(table.min + i))), target));
2753             }
2754             flushIfTerminal(data);
2755             addToGraph(Switch, OpInfo(&data), get(VirtualRegister(currentInstruction[3].u.operand)));
2756             LAST_OPCODE(op_switch_imm);
2757         }
2758             
2759         case op_switch_char: {
2760             SwitchData& data = *m_graph.m_switchData.add();
2761             data.kind = SwitchChar;
2762             data.switchTableIndex = m_inlineStackTop->m_switchRemap[currentInstruction[1].u.operand];
2763             data.fallThrough.setBytecodeIndex(m_currentIndex + currentInstruction[2].u.operand);
2764             SimpleJumpTable& table = m_codeBlock->switchJumpTable(data.switchTableIndex);
2765             for (unsigned i = 0; i < table.branchOffsets.size(); ++i) {
2766                 if (!table.branchOffsets[i])
2767                     continue;
2768                 unsigned target = m_currentIndex + table.branchOffsets[i];
2769                 if (target == data.fallThrough.bytecodeIndex())
2770                     continue;
2771                 data.cases.append(
2772                     SwitchCase::withBytecodeIndex(LazyJSValue::singleCharacterString(table.min + i), target));
2773             }
2774             flushIfTerminal(data);
2775             addToGraph(Switch, OpInfo(&data), get(VirtualRegister(currentInstruction[3].u.operand)));
2776             LAST_OPCODE(op_switch_char);
2777         }
2778
2779         case op_switch_string: {
2780             SwitchData& data = *m_graph.m_switchData.add();
2781             data.kind = SwitchString;
2782             data.switchTableIndex = currentInstruction[1].u.operand;
2783             data.fallThrough.setBytecodeIndex(m_currentIndex + currentInstruction[2].u.operand);
2784             StringJumpTable& table = m_codeBlock->stringSwitchJumpTable(data.switchTableIndex);
2785             StringJumpTable::StringOffsetTable::iterator iter;
2786             StringJumpTable::StringOffsetTable::iterator end = table.offsetTable.end();
2787             for (iter = table.offsetTable.begin(); iter != end; ++iter) {
2788                 unsigned target = m_currentIndex + iter->value.branchOffset;
2789                 if (target == data.fallThrough.bytecodeIndex())
2790                     continue;
2791                 data.cases.append(
2792                     SwitchCase::withBytecodeIndex(LazyJSValue::knownStringImpl(iter->key.get()), target));
2793             }
2794             flushIfTerminal(data);
2795             addToGraph(Switch, OpInfo(&data), get(VirtualRegister(currentInstruction[3].u.operand)));
2796             LAST_OPCODE(op_switch_string);
2797         }
2798
2799         case op_ret:
2800             flushForReturn();
2801             if (inlineCallFrame()) {
2802                 if (m_inlineStackTop->m_returnValue.isValid())
2803                     setDirect(m_inlineStackTop->m_returnValue, get(VirtualRegister(currentInstruction[1].u.operand)), ImmediateSetWithFlush);
2804                 m_inlineStackTop->m_didReturn = true;
2805                 if (m_inlineStackTop->m_unlinkedBlocks.isEmpty()) {
2806                     // If we're returning from the first block, then we're done parsing.
2807                     ASSERT(m_inlineStackTop->m_callsiteBlockHead == m_graph.lastBlock());
2808                     shouldContinueParsing = false;
2809                     LAST_OPCODE(op_ret);
2810                 } else {
2811                     // If inlining created blocks, and we're doing a return, then we need some
2812                     // special linking.
2813                     ASSERT(m_inlineStackTop->m_unlinkedBlocks.last().m_block == m_graph.lastBlock());
2814                     m_inlineStackTop->m_unlinkedBlocks.last().m_needsNormalLinking = false;
2815                 }
2816                 if (m_currentIndex + OPCODE_LENGTH(op_ret) != m_inlineStackTop->m_codeBlock->instructions().size() || m_inlineStackTop->m_didEarlyReturn) {
2817                     ASSERT(m_currentIndex + OPCODE_LENGTH(op_ret) <= m_inlineStackTop->m_codeBlock->instructions().size());
2818                     addToGraph(Jump, OpInfo(0));
2819                     m_inlineStackTop->m_unlinkedBlocks.last().m_needsEarlyReturnLinking = true;
2820                     m_inlineStackTop->m_didEarlyReturn = true;
2821                 }
2822                 LAST_OPCODE(op_ret);
2823             }
2824             addToGraph(Return, get(VirtualRegister(currentInstruction[1].u.operand)));
2825             LAST_OPCODE(op_ret);
2826             
2827         case op_end:
2828             flushForReturn();
2829             ASSERT(!inlineCallFrame());
2830             addToGraph(Return, get(VirtualRegister(currentInstruction[1].u.operand)));
2831             LAST_OPCODE(op_end);
2832
2833         case op_throw:
2834             addToGraph(Throw, get(VirtualRegister(currentInstruction[1].u.operand)));
2835             flushForTerminal();
2836             addToGraph(Unreachable);
2837             LAST_OPCODE(op_throw);
2838             
2839         case op_throw_static_error:
2840             addToGraph(ThrowReferenceError);
2841             flushForTerminal();
2842             addToGraph(Unreachable);
2843             LAST_OPCODE(op_throw_static_error);
2844             
2845         case op_call:
2846             handleCall(currentInstruction, Call, CodeForCall);
2847             NEXT_OPCODE(op_call);
2848             
2849         case op_construct:
2850             handleCall(currentInstruction, Construct, CodeForConstruct);
2851             NEXT_OPCODE(op_construct);
2852             
2853         case op_call_varargs: {
2854             int result = currentInstruction[1].u.operand;
2855             int callee = currentInstruction[2].u.operand;
2856             int thisReg = currentInstruction[3].u.operand;
2857             int arguments = currentInstruction[4].u.operand;
2858             int firstFreeReg = currentInstruction[5].u.operand;
2859             
2860             ASSERT(inlineCallFrame());
2861             ASSERT_UNUSED(arguments, arguments == m_inlineStackTop->m_codeBlock->argumentsRegister().offset());
2862             ASSERT(!m_inlineStackTop->m_codeBlock->symbolTable()->slowArguments());
2863
2864             addToGraph(CheckArgumentsNotCreated);
2865
2866             unsigned argCount = inlineCallFrame()->arguments.size();
2867             
2868             // Let's compute the register offset. We start with the last used register, and
2869             // then adjust for the things we want in the call frame.
2870             int registerOffset = firstFreeReg + 1;
2871             registerOffset -= argCount; // We will be passing some arguments.
2872             registerOffset -= JSStack::CallFrameHeaderSize; // We will pretend to have a call frame header.
2873             
2874             // Get the alignment right.
2875             registerOffset = -WTF::roundUpToMultipleOf(
2876                 stackAlignmentRegisters(),
2877                 -registerOffset);
2878
2879             ensureLocals(
2880                 m_inlineStackTop->remapOperand(
2881                     VirtualRegister(registerOffset)).toLocal());
2882             
2883             // The bytecode wouldn't have set up the arguments. But we'll do it and make it
2884             // look like the bytecode had done it.
2885             int nextRegister = registerOffset + JSStack::CallFrameHeaderSize;
2886             set(VirtualRegister(nextRegister++), get(VirtualRegister(thisReg)), ImmediateNakedSet);
2887             for (unsigned argument = 1; argument < argCount; ++argument)
2888                 set(VirtualRegister(nextRegister++), get(virtualRegisterForArgument(argument)), ImmediateNakedSet);
2889             
2890             handleCall(
2891                 result, Call, CodeForCall, OPCODE_LENGTH(op_call_varargs),
2892                 callee, argCount, registerOffset);
2893             NEXT_OPCODE(op_call_varargs);
2894         }
2895             
2896         case op_jneq_ptr:
2897             // Statically speculate for now. It makes sense to let speculate-only jneq_ptr
2898             // support simmer for a while before making it more general, since it's
2899             // already gnarly enough as it is.
2900             ASSERT(pointerIsFunction(currentInstruction[2].u.specialPointer));
2901             addToGraph(
2902                 CheckFunction,
2903                 OpInfo(m_graph.freeze(static_cast<JSCell*>(actualPointerFor(
2904                     m_inlineStackTop->m_codeBlock, currentInstruction[2].u.specialPointer)))),
2905                 get(VirtualRegister(currentInstruction[1].u.operand)));
2906             addToGraph(Jump, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jneq_ptr)));
2907             LAST_OPCODE(op_jneq_ptr);
2908
2909         case op_resolve_scope: {
2910             int dst = currentInstruction[1].u.operand;
2911             ResolveType resolveType = static_cast<ResolveType>(currentInstruction[3].u.operand);
2912             unsigned depth = currentInstruction[4].u.operand;
2913
2914             // get_from_scope and put_to_scope depend on this watchpoint forcing OSR exit, so they don't add their own watchpoints.
2915             if (needsVarInjectionChecks(resolveType))
2916                 addToGraph(VarInjectionWatchpoint);
2917
2918             switch (resolveType) {
2919             case GlobalProperty:
2920             case GlobalVar:
2921             case GlobalPropertyWithVarInjectionChecks:
2922             case GlobalVarWithVarInjectionChecks:
2923                 set(VirtualRegister(dst), weakJSConstant(m_inlineStackTop->m_codeBlock->globalObject()));
2924                 break;
2925             case ClosureVar:
2926             case ClosureVarWithVarInjectionChecks: {
2927                 JSActivation* activation = currentInstruction[5].u.activation.get();
2928                 if (activation
2929                     && activation->symbolTable()->m_functionEnteredOnce.isStillValid()) {
2930                     addToGraph(FunctionReentryWatchpoint, OpInfo(activation->symbolTable()));
2931                     set(VirtualRegister(dst), weakJSConstant(activation));
2932                     break;
2933                 }
2934                 set(VirtualRegister(dst),
2935                     getScope(m_inlineStackTop->m_codeBlock->needsActivation(), depth));
2936                 break;
2937             }
2938             case Dynamic:
2939                 RELEASE_ASSERT_NOT_REACHED();
2940                 break;
2941             }
2942             NEXT_OPCODE(op_resolve_scope);
2943         }
2944
2945         case op_get_from_scope: {
2946             int dst = currentInstruction[1].u.operand;
2947             int scope = currentInstruction[2].u.operand;
2948             unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand];
2949             StringImpl* uid = m_graph.identifiers()[identifierNumber];
2950             ResolveType resolveType = ResolveModeAndType(currentInstruction[4].u.operand).type();
2951
2952             Structure* structure = 0;
2953             WatchpointSet* watchpoints = 0;
2954             uintptr_t operand;
2955             {
2956                 ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
2957                 if (resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks)
2958                     watchpoints = currentInstruction[5].u.watchpointSet;
2959                 else
2960                     structure = currentInstruction[5].u.structure.get();
2961                 operand = reinterpret_cast<uintptr_t>(currentInstruction[6].u.pointer);
2962             }
2963
2964             UNUSED_PARAM(watchpoints); // We will use this in the future. For now we set it as a way of documenting the fact that that's what index 5 is in GlobalVar mode.
2965
2966             JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
2967
2968             switch (resolveType) {
2969             case GlobalProperty:
2970             case GlobalPropertyWithVarInjectionChecks: {
2971                 SpeculatedType prediction = getPrediction();
2972                 GetByIdStatus status = GetByIdStatus::computeFor(*m_vm, structure, uid);
2973                 if (status.state() != GetByIdStatus::Simple
2974                     || status.numVariants() != 1
2975                     || status[0].structureSet().size() != 1) {
2976                     set(VirtualRegister(dst), addToGraph(GetByIdFlush, OpInfo(identifierNumber), OpInfo(prediction), get(VirtualRegister(scope))));
2977                     break;
2978                 }
2979                 Node* base = cellConstantWithStructureCheck(globalObject, status[0].structureSet().onlyStructure());
2980                 addToGraph(Phantom, get(VirtualRegister(scope)));
2981                 set(VirtualRegister(dst), handleGetByOffset(prediction, base, status[0].structureSet(), identifierNumber, operand));
2982                 break;
2983             }
2984             case GlobalVar:
2985             case GlobalVarWithVarInjectionChecks: {
2986                 addToGraph(Phantom, get(VirtualRegister(scope)));
2987                 SymbolTableEntry entry = globalObject->symbolTable()->get(uid);
2988                 VariableWatchpointSet* watchpointSet = entry.watchpointSet();
2989                 JSValue inferredValue =
2990                     watchpointSet ? watchpointSet->inferredValue() : JSValue();
2991                 if (!inferredValue) {
2992                     SpeculatedType prediction = getPrediction();
2993                     set(VirtualRegister(dst), addToGraph(GetGlobalVar, OpInfo(operand), OpInfo(prediction)));
2994                     break;
2995                 }
2996                 
2997                 addToGraph(VariableWatchpoint, OpInfo(watchpointSet));
2998                 set(VirtualRegister(dst), weakJSConstant(inferredValue));
2999                 break;
3000             }
3001             case ClosureVar:
3002             case ClosureVarWithVarInjectionChecks: {
3003                 Node* scopeNode = get(VirtualRegister(scope));
3004                 if (JSActivation* activation = m_graph.tryGetActivation(scopeNode)) {
3005                     SymbolTable* symbolTable = activation->symbolTable();
3006                     ConcurrentJITLocker locker(symbolTable->m_lock);
3007                     SymbolTable::Map::iterator iter = symbolTable->find(locker, uid);
3008                     ASSERT(iter != symbolTable->end(locker));
3009                     VariableWatchpointSet* watchpointSet = iter->value.watchpointSet();
3010                     if (watchpointSet) {
3011                         if (JSValue value = watchpointSet->inferredValue()) {
3012                             addToGraph(Phantom, scopeNode);
3013                             addToGraph(VariableWatchpoint, OpInfo(watchpointSet));
3014                             set(VirtualRegister(dst), weakJSConstant(value));
3015                             break;
3016                         }
3017                     }
3018                 }
3019                 SpeculatedType prediction = getPrediction();
3020                 set(VirtualRegister(dst),
3021                     addToGraph(GetClosureVar, OpInfo(operand), OpInfo(prediction), 
3022                         addToGraph(GetClosureRegisters, scopeNode)));
3023                 break;
3024             }
3025             case Dynamic:
3026                 RELEASE_ASSERT_NOT_REACHED();
3027                 break;
3028             }
3029             NEXT_OPCODE(op_get_from_scope);
3030         }
3031
3032         case op_put_to_scope: {
3033             unsigned scope = currentInstruction[1].u.operand;
3034             unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
3035             unsigned value = currentInstruction[3].u.operand;
3036             ResolveType resolveType = ResolveModeAndType(currentInstruction[4].u.operand).type();
3037             StringImpl* uid = m_graph.identifiers()[identifierNumber];
3038
3039             Structure* structure = 0;
3040             VariableWatchpointSet* watchpoints = 0;
3041             uintptr_t operand;
3042             {
3043                 ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
3044                 if (resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks)
3045                     watchpoints = currentInstruction[5].u.watchpointSet;
3046                 else
3047                     structure = currentInstruction[5].u.structure.get();
3048                 operand = reinterpret_cast<uintptr_t>(currentInstruction[6].u.pointer);
3049             }
3050
3051             JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
3052
3053             switch (resolveType) {
3054             case GlobalProperty:
3055             case GlobalPropertyWithVarInjectionChecks: {
3056                 PutByIdStatus status = PutByIdStatus::computeFor(*m_vm, globalObject, structure, uid, false);
3057                 if (status.numVariants() != 1
3058                     || status[0].kind() != PutByIdVariant::Replace
3059                     || status[0].structure().size() != 1) {
3060                     addToGraph(PutById, OpInfo(identifierNumber), get(VirtualRegister(scope)), get(VirtualRegister(value)));
3061                     break;
3062                 }
3063                 ASSERT(status[0].structure().onlyStructure() == structure);
3064                 Node* base = cellConstantWithStructureCheck(globalObject, structure);
3065                 addToGraph(Phantom, get(VirtualRegister(scope)));
3066                 handlePutByOffset(base, identifierNumber, static_cast<PropertyOffset>(operand), get(VirtualRegister(value)));
3067                 // Keep scope alive until after put.
3068                 addToGraph(Phantom, get(VirtualRegister(scope)));
3069                 break;
3070             }
3071             case GlobalVar:
3072             case GlobalVarWithVarInjectionChecks: {
3073                 SymbolTableEntry entry = globalObject->symbolTable()->get(uid);
3074                 ASSERT(watchpoints == entry.watchpointSet());
3075                 Node* valueNode = get(VirtualRegister(value));
3076                 addToGraph(PutGlobalVar, OpInfo(operand), valueNode);
3077                 if (watchpoints->state() != IsInvalidated)
3078                     addToGraph(NotifyWrite, OpInfo(watchpoints), valueNode);
3079                 // Keep scope alive until after put.
3080                 addToGraph(Phantom, get(VirtualRegister(scope)));
3081                 break;
3082             }
3083             case ClosureVar:
3084             case ClosureVarWithVarInjectionChecks: {
3085                 Node* scopeNode = get(VirtualRegister(scope));
3086                 Node* scopeRegisters = addToGraph(GetClosureRegisters, scopeNode);
3087                 addToGraph(PutClosureVar, OpInfo(operand), scopeNode, scopeRegisters, get(VirtualRegister(value)));
3088                 break;
3089             }
3090             case Dynamic:
3091                 RELEASE_ASSERT_NOT_REACHED();
3092                 break;
3093             }
3094             NEXT_OPCODE(op_put_to_scope);
3095         }
3096
3097         case op_loop_hint: {
3098             // Baseline->DFG OSR jumps between loop hints. The DFG assumes that Baseline->DFG
3099             // OSR can only happen at basic block boundaries. Assert that these two statements
3100             // are compatible.
3101             RELEASE_ASSERT(m_currentIndex == blockBegin);
3102             
3103             // We never do OSR into an inlined code block. That could not happen, since OSR
3104             // looks up the code block that is the replacement for the baseline JIT code
3105             // block. Hence, machine code block = true code block = not inline code block.
3106             if (!m_inlineStackTop->m_caller)
3107                 m_currentBlock->isOSRTarget = true;
3108
3109             addToGraph(LoopHint);
3110             
3111             if (m_vm->watchdog && m_vm->watchdog->isEnabled())
3112                 addToGraph(CheckWatchdogTimer);
3113             
3114             NEXT_OPCODE(op_loop_hint);
3115         }
3116             
3117         case op_init_lazy_reg: {
3118             set(VirtualRegister(currentInstruction[1].u.operand), jsConstant(JSValue()));
3119             ASSERT(operandIsLocal(currentInstruction[1].u.operand));
3120             m_graph.m_lazyVars.set(VirtualRegister(currentInstruction[1].u.operand).toLocal());