8d9dae871dd1da9c1db50d9b40ee60762b2a6dcb
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGByteCodeParser.cpp
1  /*
2  * Copyright (C) 2011, 2012, 2013, 2014 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGByteCodeParser.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "ArrayConstructor.h"
32 #include "CallLinkStatus.h"
33 #include "CodeBlock.h"
34 #include "CodeBlockWithJITType.h"
35 #include "DFGArrayMode.h"
36 #include "DFGCapabilities.h"
37 #include "DFGJITCode.h"
38 #include "GetByIdStatus.h"
39 #include "Heap.h"
40 #include "JSActivation.h"
41 #include "JSCInlines.h"
42 #include "PreciseJumpTargets.h"
43 #include "PutByIdStatus.h"
44 #include "StackAlignment.h"
45 #include "StringConstructor.h"
46 #include <wtf/CommaPrinter.h>
47 #include <wtf/HashMap.h>
48 #include <wtf/MathExtras.h>
49 #include <wtf/StdLibExtras.h>
50
51 namespace JSC { namespace DFG {
52
53 class ConstantBufferKey {
54 public:
55     ConstantBufferKey()
56         : m_codeBlock(0)
57         , m_index(0)
58     {
59     }
60     
61     ConstantBufferKey(WTF::HashTableDeletedValueType)
62         : m_codeBlock(0)
63         , m_index(1)
64     {
65     }
66     
67     ConstantBufferKey(CodeBlock* codeBlock, unsigned index)
68         : m_codeBlock(codeBlock)
69         , m_index(index)
70     {
71     }
72     
73     bool operator==(const ConstantBufferKey& other) const
74     {
75         return m_codeBlock == other.m_codeBlock
76             && m_index == other.m_index;
77     }
78     
79     unsigned hash() const
80     {
81         return WTF::PtrHash<CodeBlock*>::hash(m_codeBlock) ^ m_index;
82     }
83     
84     bool isHashTableDeletedValue() const
85     {
86         return !m_codeBlock && m_index;
87     }
88     
89     CodeBlock* codeBlock() const { return m_codeBlock; }
90     unsigned index() const { return m_index; }
91     
92 private:
93     CodeBlock* m_codeBlock;
94     unsigned m_index;
95 };
96
97 struct ConstantBufferKeyHash {
98     static unsigned hash(const ConstantBufferKey& key) { return key.hash(); }
99     static bool equal(const ConstantBufferKey& a, const ConstantBufferKey& b)
100     {
101         return a == b;
102     }
103     
104     static const bool safeToCompareToEmptyOrDeleted = true;
105 };
106
107 } } // namespace JSC::DFG
108
109 namespace WTF {
110
111 template<typename T> struct DefaultHash;
112 template<> struct DefaultHash<JSC::DFG::ConstantBufferKey> {
113     typedef JSC::DFG::ConstantBufferKeyHash Hash;
114 };
115
116 template<typename T> struct HashTraits;
117 template<> struct HashTraits<JSC::DFG::ConstantBufferKey> : SimpleClassHashTraits<JSC::DFG::ConstantBufferKey> { };
118
119 } // namespace WTF
120
121 namespace JSC { namespace DFG {
122
123 // === ByteCodeParser ===
124 //
125 // This class is used to compile the dataflow graph from a CodeBlock.
126 class ByteCodeParser {
127 public:
128     ByteCodeParser(Graph& graph)
129         : m_vm(&graph.m_vm)
130         , m_codeBlock(graph.m_codeBlock)
131         , m_profiledBlock(graph.m_profiledBlock)
132         , m_graph(graph)
133         , m_currentBlock(0)
134         , m_currentIndex(0)
135         , m_constantUndefined(graph.freeze(jsUndefined()))
136         , m_constantNull(graph.freeze(jsNull()))
137         , m_constantNaN(graph.freeze(jsNumber(PNaN)))
138         , m_constantOne(graph.freeze(jsNumber(1)))
139         , m_numArguments(m_codeBlock->numParameters())
140         , m_numLocals(m_codeBlock->m_numCalleeRegisters)
141         , m_parameterSlots(0)
142         , m_numPassedVarArgs(0)
143         , m_inlineStackTop(0)
144         , m_haveBuiltOperandMaps(false)
145         , m_currentInstruction(0)
146     {
147         ASSERT(m_profiledBlock);
148     }
149     
150     // Parse a full CodeBlock of bytecode.
151     bool parse();
152     
153 private:
154     struct InlineStackEntry;
155
156     // Just parse from m_currentIndex to the end of the current CodeBlock.
157     void parseCodeBlock();
158     
159     void ensureLocals(unsigned newNumLocals)
160     {
161         if (newNumLocals <= m_numLocals)
162             return;
163         m_numLocals = newNumLocals;
164         for (size_t i = 0; i < m_graph.numBlocks(); ++i)
165             m_graph.block(i)->ensureLocals(newNumLocals);
166     }
167
168     // Helper for min and max.
169     bool handleMinMax(int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis);
170     
171     // Handle calls. This resolves issues surrounding inlining and intrinsics.
172     void handleCall(
173         int result, NodeType op, InlineCallFrame::Kind, unsigned instructionSize,
174         Node* callTarget, int argCount, int registerOffset, CallLinkStatus,
175         SpeculatedType prediction);
176     void handleCall(
177         int result, NodeType op, InlineCallFrame::Kind, unsigned instructionSize,
178         Node* callTarget, int argCount, int registerOffset, CallLinkStatus);
179     void handleCall(int result, NodeType op, CodeSpecializationKind, unsigned instructionSize, int callee, int argCount, int registerOffset);
180     void handleCall(Instruction* pc, NodeType op, CodeSpecializationKind);
181     void emitFunctionChecks(const CallLinkStatus&, Node* callTarget, int registerOffset, CodeSpecializationKind);
182     void emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis, CodeSpecializationKind);
183     // Handle inlining. Return true if it succeeded, false if we need to plant a call.
184     bool handleInlining(Node* callTargetNode, int resultOperand, const CallLinkStatus&, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind);
185     // Handle intrinsic functions. Return true if it succeeded, false if we need to plant a call.
186     bool handleIntrinsic(int resultOperand, Intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction);
187     bool handleTypedArrayConstructor(int resultOperand, InternalFunction*, int registerOffset, int argumentCountIncludingThis, TypedArrayType);
188     bool handleConstantInternalFunction(int resultOperand, InternalFunction*, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, CodeSpecializationKind);
189     Node* handlePutByOffset(Node* base, unsigned identifier, PropertyOffset, Node* value);
190     Node* handleGetByOffset(SpeculatedType, Node* base, const StructureSet&, unsigned identifierNumber, PropertyOffset, NodeType op = GetByOffset);
191     void handleGetById(
192         int destinationOperand, SpeculatedType, Node* base, unsigned identifierNumber,
193         const GetByIdStatus&);
194     void emitPutById(
195         Node* base, unsigned identifierNumber, Node* value,  const PutByIdStatus&, bool isDirect);
196     void handlePutById(
197         Node* base, unsigned identifierNumber, Node* value, const PutByIdStatus&,
198         bool isDirect);
199     void emitChecks(const ConstantStructureCheckVector&);
200
201     Node* getScope(bool skipTop, unsigned skipCount);
202     
203     // Prepare to parse a block.
204     void prepareToParseBlock();
205     // Parse a single basic block of bytecode instructions.
206     bool parseBlock(unsigned limit);
207     // Link block successors.
208     void linkBlock(BasicBlock*, Vector<BasicBlock*>& possibleTargets);
209     void linkBlocks(Vector<UnlinkedBlock>& unlinkedBlocks, Vector<BasicBlock*>& possibleTargets);
210     
211     VariableAccessData* newVariableAccessData(VirtualRegister operand, bool isCaptured)
212     {
213         ASSERT(!operand.isConstant());
214         
215         m_graph.m_variableAccessData.append(VariableAccessData(operand, isCaptured));
216         return &m_graph.m_variableAccessData.last();
217     }
218     
219     // Get/Set the operands/result of a bytecode instruction.
220     Node* getDirect(VirtualRegister operand)
221     {
222         ASSERT(!operand.isConstant());
223
224         // Is this an argument?
225         if (operand.isArgument())
226             return getArgument(operand);
227
228         // Must be a local.
229         return getLocal(operand);
230     }
231
232     Node* get(VirtualRegister operand)
233     {
234         if (operand.isConstant()) {
235             unsigned constantIndex = operand.toConstantIndex();
236             unsigned oldSize = m_constants.size();
237             if (constantIndex >= oldSize || !m_constants[constantIndex]) {
238                 JSValue value = m_inlineStackTop->m_codeBlock->getConstant(operand.offset());
239                 if (constantIndex >= oldSize) {
240                     m_constants.grow(constantIndex + 1);
241                     for (unsigned i = oldSize; i < m_constants.size(); ++i)
242                         m_constants[i] = nullptr;
243                 }
244                 m_constants[constantIndex] =
245                     addToGraph(JSConstant, OpInfo(m_graph.freezeStrong(value)));
246             }
247             ASSERT(m_constants[constantIndex]);
248             return m_constants[constantIndex];
249         }
250         
251         if (inlineCallFrame()) {
252             if (!inlineCallFrame()->isClosureCall) {
253                 JSFunction* callee = inlineCallFrame()->calleeConstant();
254                 if (operand.offset() == JSStack::Callee)
255                     return weakJSConstant(callee);
256                 if (operand.offset() == JSStack::ScopeChain)
257                     return weakJSConstant(callee->scope());
258             }
259         } else if (operand.offset() == JSStack::Callee)
260             return addToGraph(GetCallee);
261         else if (operand.offset() == JSStack::ScopeChain)
262             return addToGraph(GetMyScope);
263         
264         return getDirect(m_inlineStackTop->remapOperand(operand));
265     }
266     
267     enum SetMode {
268         // A normal set which follows a two-phase commit that spans code origins. During
269         // the current code origin it issues a MovHint, and at the start of the next
270         // code origin there will be a SetLocal. If the local needs flushing, the second
271         // SetLocal will be preceded with a Flush.
272         NormalSet,
273         
274         // A set where the SetLocal happens immediately and there is still a Flush. This
275         // is relevant when assigning to a local in tricky situations for the delayed
276         // SetLocal logic but where we know that we have not performed any side effects
277         // within this code origin. This is a safe replacement for NormalSet anytime we
278         // know that we have not yet performed side effects in this code origin.
279         ImmediateSetWithFlush,
280         
281         // A set where the SetLocal happens immediately and we do not Flush it even if
282         // this is a local that is marked as needing it. This is relevant when
283         // initializing locals at the top of a function.
284         ImmediateNakedSet
285     };
286     Node* setDirect(VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
287     {
288         addToGraph(MovHint, OpInfo(operand.offset()), value);
289         
290         DelayedSetLocal delayed = DelayedSetLocal(operand, value);
291         
292         if (setMode == NormalSet) {
293             m_setLocalQueue.append(delayed);
294             return 0;
295         }
296         
297         return delayed.execute(this, setMode);
298     }
299
300     Node* set(VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
301     {
302         return setDirect(m_inlineStackTop->remapOperand(operand), value, setMode);
303     }
304     
305     Node* injectLazyOperandSpeculation(Node* node)
306     {
307         ASSERT(node->op() == GetLocal);
308         ASSERT(node->origin.semantic.bytecodeIndex == m_currentIndex);
309         ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
310         LazyOperandValueProfileKey key(m_currentIndex, node->local());
311         SpeculatedType prediction = m_inlineStackTop->m_lazyOperands.prediction(locker, key);
312         node->variableAccessData()->predict(prediction);
313         return node;
314     }
315
316     // Used in implementing get/set, above, where the operand is a local variable.
317     Node* getLocal(VirtualRegister operand)
318     {
319         unsigned local = operand.toLocal();
320
321         if (local < m_localWatchpoints.size()) {
322             if (VariableWatchpointSet* set = m_localWatchpoints[local]) {
323                 if (JSValue value = set->inferredValue()) {
324                     addToGraph(FunctionReentryWatchpoint, OpInfo(m_codeBlock->symbolTable()));
325                     addToGraph(VariableWatchpoint, OpInfo(set));
326                     return weakJSConstant(value);
327                 }
328             }
329         }
330
331         Node* node = m_currentBlock->variablesAtTail.local(local);
332         bool isCaptured = m_codeBlock->isCaptured(operand, inlineCallFrame());
333         
334         // This has two goals: 1) link together variable access datas, and 2)
335         // try to avoid creating redundant GetLocals. (1) is required for
336         // correctness - no other phase will ensure that block-local variable
337         // access data unification is done correctly. (2) is purely opportunistic
338         // and is meant as an compile-time optimization only.
339         
340         VariableAccessData* variable;
341         
342         if (node) {
343             variable = node->variableAccessData();
344             variable->mergeIsCaptured(isCaptured);
345             
346             if (!isCaptured) {
347                 switch (node->op()) {
348                 case GetLocal:
349                     return node;
350                 case SetLocal:
351                     return node->child1().node();
352                 default:
353                     break;
354                 }
355             }
356         } else
357             variable = newVariableAccessData(operand, isCaptured);
358         
359         node = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variable)));
360         m_currentBlock->variablesAtTail.local(local) = node;
361         return node;
362     }
363
364     Node* setLocal(VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
365     {
366         unsigned local = operand.toLocal();
367         bool isCaptured = m_codeBlock->isCaptured(operand, inlineCallFrame());
368         
369         if (setMode != ImmediateNakedSet) {
370             ArgumentPosition* argumentPosition = findArgumentPositionForLocal(operand);
371             if (isCaptured || argumentPosition)
372                 flushDirect(operand, argumentPosition);
373         }
374
375         VariableAccessData* variableAccessData = newVariableAccessData(operand, isCaptured);
376         variableAccessData->mergeStructureCheckHoistingFailed(
377             m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
378         variableAccessData->mergeCheckArrayHoistingFailed(
379             m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIndexingType));
380         Node* node = addToGraph(SetLocal, OpInfo(variableAccessData), value);
381         m_currentBlock->variablesAtTail.local(local) = node;
382         return node;
383     }
384
385     // Used in implementing get/set, above, where the operand is an argument.
386     Node* getArgument(VirtualRegister operand)
387     {
388         unsigned argument = operand.toArgument();
389         ASSERT(argument < m_numArguments);
390         
391         Node* node = m_currentBlock->variablesAtTail.argument(argument);
392         bool isCaptured = m_codeBlock->isCaptured(operand);
393
394         VariableAccessData* variable;
395         
396         if (node) {
397             variable = node->variableAccessData();
398             variable->mergeIsCaptured(isCaptured);
399             
400             switch (node->op()) {
401             case GetLocal:
402                 return node;
403             case SetLocal:
404                 return node->child1().node();
405             default:
406                 break;
407             }
408         } else
409             variable = newVariableAccessData(operand, isCaptured);
410         
411         node = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variable)));
412         m_currentBlock->variablesAtTail.argument(argument) = node;
413         return node;
414     }
415     Node* setArgument(VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
416     {
417         unsigned argument = operand.toArgument();
418         ASSERT(argument < m_numArguments);
419         
420         bool isCaptured = m_codeBlock->isCaptured(operand);
421
422         VariableAccessData* variableAccessData = newVariableAccessData(operand, isCaptured);
423
424         // Always flush arguments, except for 'this'. If 'this' is created by us,
425         // then make sure that it's never unboxed.
426         if (argument) {
427             if (setMode != ImmediateNakedSet)
428                 flushDirect(operand);
429         } else if (m_codeBlock->specializationKind() == CodeForConstruct)
430             variableAccessData->mergeShouldNeverUnbox(true);
431         
432         variableAccessData->mergeStructureCheckHoistingFailed(
433             m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
434         variableAccessData->mergeCheckArrayHoistingFailed(
435             m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIndexingType));
436         Node* node = addToGraph(SetLocal, OpInfo(variableAccessData), value);
437         m_currentBlock->variablesAtTail.argument(argument) = node;
438         return node;
439     }
440     
441     ArgumentPosition* findArgumentPositionForArgument(int argument)
442     {
443         InlineStackEntry* stack = m_inlineStackTop;
444         while (stack->m_inlineCallFrame)
445             stack = stack->m_caller;
446         return stack->m_argumentPositions[argument];
447     }
448     
449     ArgumentPosition* findArgumentPositionForLocal(VirtualRegister operand)
450     {
451         for (InlineStackEntry* stack = m_inlineStackTop; ; stack = stack->m_caller) {
452             InlineCallFrame* inlineCallFrame = stack->m_inlineCallFrame;
453             if (!inlineCallFrame)
454                 break;
455             if (operand.offset() < static_cast<int>(inlineCallFrame->stackOffset + JSStack::CallFrameHeaderSize))
456                 continue;
457             if (operand.offset() == inlineCallFrame->stackOffset + CallFrame::thisArgumentOffset())
458                 continue;
459             if (operand.offset() >= static_cast<int>(inlineCallFrame->stackOffset + CallFrame::thisArgumentOffset() + inlineCallFrame->arguments.size()))
460                 continue;
461             int argument = VirtualRegister(operand.offset() - inlineCallFrame->stackOffset).toArgument();
462             return stack->m_argumentPositions[argument];
463         }
464         return 0;
465     }
466     
467     ArgumentPosition* findArgumentPosition(VirtualRegister operand)
468     {
469         if (operand.isArgument())
470             return findArgumentPositionForArgument(operand.toArgument());
471         return findArgumentPositionForLocal(operand);
472     }
473
474     void flush(VirtualRegister operand)
475     {
476         flushDirect(m_inlineStackTop->remapOperand(operand));
477     }
478     
479     void flushDirect(VirtualRegister operand)
480     {
481         flushDirect(operand, findArgumentPosition(operand));
482     }
483     
484     void flushDirect(VirtualRegister operand, ArgumentPosition* argumentPosition)
485     {
486         bool isCaptured = m_codeBlock->isCaptured(operand, inlineCallFrame());
487         
488         ASSERT(!operand.isConstant());
489         
490         Node* node = m_currentBlock->variablesAtTail.operand(operand);
491         
492         VariableAccessData* variable;
493         
494         if (node) {
495             variable = node->variableAccessData();
496             variable->mergeIsCaptured(isCaptured);
497         } else
498             variable = newVariableAccessData(operand, isCaptured);
499         
500         node = addToGraph(Flush, OpInfo(variable));
501         m_currentBlock->variablesAtTail.operand(operand) = node;
502         if (argumentPosition)
503             argumentPosition->addVariable(variable);
504     }
505     
506     void flush(InlineStackEntry* inlineStackEntry)
507     {
508         int numArguments;
509         if (InlineCallFrame* inlineCallFrame = inlineStackEntry->m_inlineCallFrame) {
510             numArguments = inlineCallFrame->arguments.size();
511             if (inlineCallFrame->isClosureCall) {
512                 flushDirect(inlineStackEntry->remapOperand(VirtualRegister(JSStack::Callee)));
513                 flushDirect(inlineStackEntry->remapOperand(VirtualRegister(JSStack::ScopeChain)));
514             }
515         } else
516             numArguments = inlineStackEntry->m_codeBlock->numParameters();
517         for (unsigned argument = numArguments; argument-- > 1;)
518             flushDirect(inlineStackEntry->remapOperand(virtualRegisterForArgument(argument)));
519         for (int local = 0; local < inlineStackEntry->m_codeBlock->m_numVars; ++local) {
520             if (!inlineStackEntry->m_codeBlock->isCaptured(virtualRegisterForLocal(local)))
521                 continue;
522             flushDirect(inlineStackEntry->remapOperand(virtualRegisterForLocal(local)));
523         }
524     }
525
526     void flushForTerminal()
527     {
528         for (InlineStackEntry* inlineStackEntry = m_inlineStackTop; inlineStackEntry; inlineStackEntry = inlineStackEntry->m_caller)
529             flush(inlineStackEntry);
530     }
531
532     void flushForReturn()
533     {
534         flush(m_inlineStackTop);
535     }
536     
537     void flushIfTerminal(SwitchData& data)
538     {
539         if (data.fallThrough.bytecodeIndex() > m_currentIndex)
540             return;
541         
542         for (unsigned i = data.cases.size(); i--;) {
543             if (data.cases[i].target.bytecodeIndex() > m_currentIndex)
544                 return;
545         }
546         
547         flushForTerminal();
548     }
549
550     // Assumes that the constant should be strongly marked.
551     Node* jsConstant(JSValue constantValue)
552     {
553         return addToGraph(JSConstant, OpInfo(m_graph.freezeStrong(constantValue)));
554     }
555
556     Node* weakJSConstant(JSValue constantValue)
557     {
558         return addToGraph(JSConstant, OpInfo(m_graph.freeze(constantValue)));
559     }
560
561     // Helper functions to get/set the this value.
562     Node* getThis()
563     {
564         return get(m_inlineStackTop->m_codeBlock->thisRegister());
565     }
566
567     void setThis(Node* value)
568     {
569         set(m_inlineStackTop->m_codeBlock->thisRegister(), value);
570     }
571
572     InlineCallFrame* inlineCallFrame()
573     {
574         return m_inlineStackTop->m_inlineCallFrame;
575     }
576
577     CodeOrigin currentCodeOrigin()
578     {
579         return CodeOrigin(m_currentIndex, inlineCallFrame());
580     }
581     
582     BranchData* branchData(unsigned taken, unsigned notTaken)
583     {
584         // We assume that branches originating from bytecode always have a fall-through. We
585         // use this assumption to avoid checking for the creation of terminal blocks.
586         ASSERT((taken > m_currentIndex) || (notTaken > m_currentIndex));
587         BranchData* data = m_graph.m_branchData.add();
588         *data = BranchData::withBytecodeIndices(taken, notTaken);
589         return data;
590     }
591     
592     Node* addToGraph(NodeType op, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
593     {
594         Node* result = m_graph.addNode(
595             SpecNone, op, NodeOrigin(currentCodeOrigin()), Edge(child1), Edge(child2),
596             Edge(child3));
597         ASSERT(op != Phi);
598         m_currentBlock->append(result);
599         return result;
600     }
601     Node* addToGraph(NodeType op, Edge child1, Edge child2 = Edge(), Edge child3 = Edge())
602     {
603         Node* result = m_graph.addNode(
604             SpecNone, op, NodeOrigin(currentCodeOrigin()), child1, child2, child3);
605         ASSERT(op != Phi);
606         m_currentBlock->append(result);
607         return result;
608     }
609     Node* addToGraph(NodeType op, OpInfo info, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
610     {
611         Node* result = m_graph.addNode(
612             SpecNone, op, NodeOrigin(currentCodeOrigin()), info, Edge(child1), Edge(child2),
613             Edge(child3));
614         ASSERT(op != Phi);
615         m_currentBlock->append(result);
616         return result;
617     }
618     Node* addToGraph(NodeType op, OpInfo info1, OpInfo info2, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
619     {
620         Node* result = m_graph.addNode(
621             SpecNone, op, NodeOrigin(currentCodeOrigin()), info1, info2,
622             Edge(child1), Edge(child2), Edge(child3));
623         ASSERT(op != Phi);
624         m_currentBlock->append(result);
625         return result;
626     }
627     
628     Node* addToGraph(Node::VarArgTag, NodeType op, OpInfo info1, OpInfo info2)
629     {
630         Node* result = m_graph.addNode(
631             SpecNone, Node::VarArg, op, NodeOrigin(currentCodeOrigin()), info1, info2,
632             m_graph.m_varArgChildren.size() - m_numPassedVarArgs, m_numPassedVarArgs);
633         ASSERT(op != Phi);
634         m_currentBlock->append(result);
635         
636         m_numPassedVarArgs = 0;
637         
638         return result;
639     }
640
641     void addVarArgChild(Node* child)
642     {
643         m_graph.m_varArgChildren.append(Edge(child));
644         m_numPassedVarArgs++;
645     }
646     
647     Node* addCallWithoutSettingResult(
648         NodeType op, Node* callee, int argCount, int registerOffset,
649         SpeculatedType prediction)
650     {
651         addVarArgChild(callee);
652         size_t parameterSlots = JSStack::CallFrameHeaderSize - JSStack::CallerFrameAndPCSize + argCount;
653         if (parameterSlots > m_parameterSlots)
654             m_parameterSlots = parameterSlots;
655
656         int dummyThisArgument = op == Call || op == NativeCall ? 0 : 1;
657         for (int i = 0 + dummyThisArgument; i < argCount; ++i)
658             addVarArgChild(get(virtualRegisterForArgument(i, registerOffset)));
659
660         return addToGraph(Node::VarArg, op, OpInfo(0), OpInfo(prediction));
661     }
662     
663     Node* addCall(
664         int result, NodeType op, Node* callee, int argCount, int registerOffset,
665         SpeculatedType prediction)
666     {
667         Node* call = addCallWithoutSettingResult(
668             op, callee, argCount, registerOffset, prediction);
669         VirtualRegister resultReg(result);
670         if (resultReg.isValid())
671             set(VirtualRegister(result), call);
672         return call;
673     }
674     
675     Node* cellConstantWithStructureCheck(JSCell* object, Structure* structure)
676     {
677         Node* objectNode = weakJSConstant(object);
678         addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(structure)), objectNode);
679         return objectNode;
680     }
681     
682     SpeculatedType getPredictionWithoutOSRExit(unsigned bytecodeIndex)
683     {
684         ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
685         return m_inlineStackTop->m_profiledBlock->valueProfilePredictionForBytecodeOffset(locker, bytecodeIndex);
686     }
687
688     SpeculatedType getPrediction(unsigned bytecodeIndex)
689     {
690         SpeculatedType prediction = getPredictionWithoutOSRExit(bytecodeIndex);
691         
692         if (prediction == SpecNone) {
693             // We have no information about what values this node generates. Give up
694             // on executing this code, since we're likely to do more damage than good.
695             addToGraph(ForceOSRExit);
696         }
697         
698         return prediction;
699     }
700     
701     SpeculatedType getPredictionWithoutOSRExit()
702     {
703         return getPredictionWithoutOSRExit(m_currentIndex);
704     }
705     
706     SpeculatedType getPrediction()
707     {
708         return getPrediction(m_currentIndex);
709     }
710     
711     ArrayMode getArrayMode(ArrayProfile* profile, Array::Action action)
712     {
713         ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
714         profile->computeUpdatedPrediction(locker, m_inlineStackTop->m_profiledBlock);
715         return ArrayMode::fromObserved(locker, profile, action, false);
716     }
717     
718     ArrayMode getArrayMode(ArrayProfile* profile)
719     {
720         return getArrayMode(profile, Array::Read);
721     }
722     
723     ArrayMode getArrayModeConsideringSlowPath(ArrayProfile* profile, Array::Action action)
724     {
725         ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
726         
727         profile->computeUpdatedPrediction(locker, m_inlineStackTop->m_profiledBlock);
728         
729         bool makeSafe =
730             m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)
731             || profile->outOfBounds(locker);
732         
733         ArrayMode result = ArrayMode::fromObserved(locker, profile, action, makeSafe);
734         
735         return result;
736     }
737     
738     Node* makeSafe(Node* node)
739     {
740         if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
741             node->mergeFlags(NodeMayOverflowInDFG);
742         if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
743             node->mergeFlags(NodeMayNegZeroInDFG);
744         
745         if (!isX86() && node->op() == ArithMod)
746             return node;
747
748         if (!m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex))
749             return node;
750         
751         switch (node->op()) {
752         case UInt32ToNumber:
753         case ArithAdd:
754         case ArithSub:
755         case ValueAdd:
756         case ArithMod: // for ArithMod "MayOverflow" means we tried to divide by zero, or we saw double.
757             node->mergeFlags(NodeMayOverflowInBaseline);
758             break;
759             
760         case ArithNegate:
761             // Currently we can't tell the difference between a negation overflowing
762             // (i.e. -(1 << 31)) or generating negative zero (i.e. -0). If it took slow
763             // path then we assume that it did both of those things.
764             node->mergeFlags(NodeMayOverflowInBaseline);
765             node->mergeFlags(NodeMayNegZeroInBaseline);
766             break;
767
768         case ArithMul:
769             // FIXME: We should detect cases where we only overflowed but never created
770             // negative zero.
771             // https://bugs.webkit.org/show_bug.cgi?id=132470
772             if (m_inlineStackTop->m_profiledBlock->likelyToTakeDeepestSlowCase(m_currentIndex)
773                 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
774                 node->mergeFlags(NodeMayOverflowInBaseline | NodeMayNegZeroInBaseline);
775             else if (m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)
776                 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
777                 node->mergeFlags(NodeMayNegZeroInBaseline);
778             break;
779             
780         default:
781             RELEASE_ASSERT_NOT_REACHED();
782             break;
783         }
784         
785         return node;
786     }
787     
788     Node* makeDivSafe(Node* node)
789     {
790         ASSERT(node->op() == ArithDiv);
791         
792         if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
793             node->mergeFlags(NodeMayOverflowInDFG);
794         if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
795             node->mergeFlags(NodeMayNegZeroInDFG);
796         
797         // The main slow case counter for op_div in the old JIT counts only when
798         // the operands are not numbers. We don't care about that since we already
799         // have speculations in place that take care of that separately. We only
800         // care about when the outcome of the division is not an integer, which
801         // is what the special fast case counter tells us.
802         
803         if (!m_inlineStackTop->m_profiledBlock->couldTakeSpecialFastCase(m_currentIndex))
804             return node;
805         
806         // FIXME: It might be possible to make this more granular.
807         node->mergeFlags(NodeMayOverflowInBaseline | NodeMayNegZeroInBaseline);
808         
809         return node;
810     }
811     
812     void buildOperandMapsIfNecessary();
813     
814     VM* m_vm;
815     CodeBlock* m_codeBlock;
816     CodeBlock* m_profiledBlock;
817     Graph& m_graph;
818
819     // The current block being generated.
820     BasicBlock* m_currentBlock;
821     // The bytecode index of the current instruction being generated.
822     unsigned m_currentIndex;
823
824     FrozenValue* m_constantUndefined;
825     FrozenValue* m_constantNull;
826     FrozenValue* m_constantNaN;
827     FrozenValue* m_constantOne;
828     Vector<Node*, 16> m_constants;
829
830     // The number of arguments passed to the function.
831     unsigned m_numArguments;
832     // The number of locals (vars + temporaries) used in the function.
833     unsigned m_numLocals;
834     // The number of slots (in units of sizeof(Register)) that we need to
835     // preallocate for arguments to outgoing calls from this frame. This
836     // number includes the CallFrame slots that we initialize for the callee
837     // (but not the callee-initialized CallerFrame and ReturnPC slots).
838     // This number is 0 if and only if this function is a leaf.
839     unsigned m_parameterSlots;
840     // The number of var args passed to the next var arg node.
841     unsigned m_numPassedVarArgs;
842
843     HashMap<ConstantBufferKey, unsigned> m_constantBufferCache;
844     
845     Vector<VariableWatchpointSet*, 16> m_localWatchpoints;
846     
847     struct InlineStackEntry {
848         ByteCodeParser* m_byteCodeParser;
849         
850         CodeBlock* m_codeBlock;
851         CodeBlock* m_profiledBlock;
852         InlineCallFrame* m_inlineCallFrame;
853         
854         ScriptExecutable* executable() { return m_codeBlock->ownerExecutable(); }
855         
856         QueryableExitProfile m_exitProfile;
857         
858         // Remapping of identifier and constant numbers from the code block being
859         // inlined (inline callee) to the code block that we're inlining into
860         // (the machine code block, which is the transitive, though not necessarily
861         // direct, caller).
862         Vector<unsigned> m_identifierRemap;
863         Vector<unsigned> m_constantBufferRemap;
864         Vector<unsigned> m_switchRemap;
865         
866         // Blocks introduced by this code block, which need successor linking.
867         // May include up to one basic block that includes the continuation after
868         // the callsite in the caller. These must be appended in the order that they
869         // are created, but their bytecodeBegin values need not be in order as they
870         // are ignored.
871         Vector<UnlinkedBlock> m_unlinkedBlocks;
872         
873         // Potential block linking targets. Must be sorted by bytecodeBegin, and
874         // cannot have two blocks that have the same bytecodeBegin. For this very
875         // reason, this is not equivalent to 
876         Vector<BasicBlock*> m_blockLinkingTargets;
877         
878         // If the callsite's basic block was split into two, then this will be
879         // the head of the callsite block. It needs its successors linked to the
880         // m_unlinkedBlocks, but not the other way around: there's no way for
881         // any blocks in m_unlinkedBlocks to jump back into this block.
882         BasicBlock* m_callsiteBlockHead;
883         
884         // Does the callsite block head need linking? This is typically true
885         // but will be false for the machine code block's inline stack entry
886         // (since that one is not inlined) and for cases where an inline callee
887         // did the linking for us.
888         bool m_callsiteBlockHeadNeedsLinking;
889         
890         VirtualRegister m_returnValue;
891         
892         // Speculations about variable types collected from the profiled code block,
893         // which are based on OSR exit profiles that past DFG compilatins of this
894         // code block had gathered.
895         LazyOperandValueProfileParser m_lazyOperands;
896         
897         CallLinkInfoMap m_callLinkInfos;
898         StubInfoMap m_stubInfos;
899         
900         // Did we see any returns? We need to handle the (uncommon but necessary)
901         // case where a procedure that does not return was inlined.
902         bool m_didReturn;
903         
904         // Did we have any early returns?
905         bool m_didEarlyReturn;
906         
907         // Pointers to the argument position trackers for this slice of code.
908         Vector<ArgumentPosition*> m_argumentPositions;
909         
910         InlineStackEntry* m_caller;
911         
912         InlineStackEntry(
913             ByteCodeParser*,
914             CodeBlock*,
915             CodeBlock* profiledBlock,
916             BasicBlock* callsiteBlockHead,
917             JSFunction* callee, // Null if this is a closure call.
918             VirtualRegister returnValueVR,
919             VirtualRegister inlineCallFrameStart,
920             int argumentCountIncludingThis,
921             InlineCallFrame::Kind);
922         
923         ~InlineStackEntry()
924         {
925             m_byteCodeParser->m_inlineStackTop = m_caller;
926         }
927         
928         VirtualRegister remapOperand(VirtualRegister operand) const
929         {
930             if (!m_inlineCallFrame)
931                 return operand;
932             
933             ASSERT(!operand.isConstant());
934
935             return VirtualRegister(operand.offset() + m_inlineCallFrame->stackOffset);
936         }
937     };
938     
939     InlineStackEntry* m_inlineStackTop;
940     
941     struct DelayedSetLocal {
942         VirtualRegister m_operand;
943         Node* m_value;
944         
945         DelayedSetLocal() { }
946         DelayedSetLocal(VirtualRegister operand, Node* value)
947             : m_operand(operand)
948             , m_value(value)
949         {
950         }
951         
952         Node* execute(ByteCodeParser* parser, SetMode setMode = NormalSet)
953         {
954             if (m_operand.isArgument())
955                 return parser->setArgument(m_operand, m_value, setMode);
956             return parser->setLocal(m_operand, m_value, setMode);
957         }
958     };
959     
960     Vector<DelayedSetLocal, 2> m_setLocalQueue;
961
962     // Have we built operand maps? We initialize them lazily, and only when doing
963     // inlining.
964     bool m_haveBuiltOperandMaps;
965     // Mapping between identifier names and numbers.
966     BorrowedIdentifierMap m_identifierMap;
967     
968     CodeBlock* m_dfgCodeBlock;
969     CallLinkStatus::ContextMap m_callContextMap;
970     StubInfoMap m_dfgStubInfos;
971     
972     Instruction* m_currentInstruction;
973 };
974
975 #define NEXT_OPCODE(name) \
976     m_currentIndex += OPCODE_LENGTH(name); \
977     continue
978
979 #define LAST_OPCODE(name) \
980     m_currentIndex += OPCODE_LENGTH(name); \
981     return shouldContinueParsing
982
983 void ByteCodeParser::handleCall(Instruction* pc, NodeType op, CodeSpecializationKind kind)
984 {
985     ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct));
986     handleCall(
987         pc[1].u.operand, op, kind, OPCODE_LENGTH(op_call),
988         pc[2].u.operand, pc[3].u.operand, -pc[4].u.operand);
989 }
990
991 void ByteCodeParser::handleCall(
992     int result, NodeType op, CodeSpecializationKind kind, unsigned instructionSize,
993     int callee, int argumentCountIncludingThis, int registerOffset)
994 {
995     Node* callTarget = get(VirtualRegister(callee));
996     
997     CallLinkStatus callLinkStatus = CallLinkStatus::computeFor(
998         m_inlineStackTop->m_profiledBlock, currentCodeOrigin(),
999         m_inlineStackTop->m_callLinkInfos, m_callContextMap);
1000     
1001     handleCall(
1002         result, op, InlineCallFrame::kindFor(kind), instructionSize, callTarget,
1003         argumentCountIncludingThis, registerOffset, callLinkStatus);
1004 }
1005     
1006 void ByteCodeParser::handleCall(
1007     int result, NodeType op, InlineCallFrame::Kind kind, unsigned instructionSize,
1008     Node* callTarget, int argumentCountIncludingThis, int registerOffset,
1009     CallLinkStatus callLinkStatus)
1010 {
1011     handleCall(
1012         result, op, kind, instructionSize, callTarget, argumentCountIncludingThis,
1013         registerOffset, callLinkStatus, getPrediction());
1014 }
1015
1016 void ByteCodeParser::handleCall(
1017     int result, NodeType op, InlineCallFrame::Kind kind, unsigned instructionSize,
1018     Node* callTarget, int argumentCountIncludingThis, int registerOffset,
1019     CallLinkStatus callLinkStatus, SpeculatedType prediction)
1020 {
1021     ASSERT(registerOffset <= 0);
1022     CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1023     
1024     if (callTarget->hasConstant())
1025         callLinkStatus = CallLinkStatus(callTarget->asJSValue()).setIsProved(true);
1026     
1027     if (!callLinkStatus.canOptimize()) {
1028         // Oddly, this conflates calls that haven't executed with calls that behaved sufficiently polymorphically
1029         // that we cannot optimize them.
1030         
1031         addCall(result, op, callTarget, argumentCountIncludingThis, registerOffset, prediction);
1032         return;
1033     }
1034     
1035     unsigned nextOffset = m_currentIndex + instructionSize;
1036
1037     if (InternalFunction* function = callLinkStatus.internalFunction()) {
1038         if (handleConstantInternalFunction(result, function, registerOffset, argumentCountIncludingThis, prediction, specializationKind)) {
1039             // This phantoming has to be *after* the code for the intrinsic, to signify that
1040             // the inputs must be kept alive whatever exits the intrinsic may do.
1041             addToGraph(Phantom, callTarget);
1042             emitArgumentPhantoms(registerOffset, argumentCountIncludingThis, specializationKind);
1043             return;
1044         }
1045         
1046         // Can only handle this using the generic call handler.
1047         addCall(result, op, callTarget, argumentCountIncludingThis, registerOffset, prediction);
1048         return;
1049     }
1050         
1051     Intrinsic intrinsic = callLinkStatus.intrinsicFor(specializationKind);
1052
1053     JSFunction* knownFunction = nullptr;
1054     if (intrinsic != NoIntrinsic) {
1055         emitFunctionChecks(callLinkStatus, callTarget, registerOffset, specializationKind);
1056             
1057         if (handleIntrinsic(result, intrinsic, registerOffset, argumentCountIncludingThis, prediction)) {
1058             // This phantoming has to be *after* the code for the intrinsic, to signify that
1059             // the inputs must be kept alive whatever exits the intrinsic may do.
1060             addToGraph(Phantom, callTarget);
1061             emitArgumentPhantoms(registerOffset, argumentCountIncludingThis, specializationKind);
1062             if (m_graph.compilation())
1063                 m_graph.compilation()->noticeInlinedCall();
1064             return;
1065         }
1066     } else if (handleInlining(callTarget, result, callLinkStatus, registerOffset, argumentCountIncludingThis, nextOffset, kind)) {
1067         if (m_graph.compilation())
1068             m_graph.compilation()->noticeInlinedCall();
1069         return;
1070     } else if (isFTL(m_graph.m_plan.mode) && Options::optimizeNativeCalls()) {
1071         JSFunction* function = callLinkStatus.function();
1072         if (function && function->isHostFunction()) {
1073             emitFunctionChecks(callLinkStatus, callTarget, registerOffset, specializationKind);
1074             knownFunction = function;
1075
1076             if (op == Call) 
1077                 op = NativeCall;
1078             else {
1079                 ASSERT(op == Construct);
1080                 op = NativeConstruct;
1081             }
1082         }
1083     }
1084     Node* call = addCall(result, op, callTarget, argumentCountIncludingThis, registerOffset, prediction);
1085
1086     if (knownFunction) 
1087         call->giveKnownFunction(knownFunction);
1088 }
1089
1090 void ByteCodeParser::emitFunctionChecks(const CallLinkStatus& callLinkStatus, Node* callTarget, int registerOffset, CodeSpecializationKind kind)
1091 {
1092     Node* thisArgument;
1093     if (kind == CodeForCall)
1094         thisArgument = get(virtualRegisterForArgument(0, registerOffset));
1095     else
1096         thisArgument = 0;
1097
1098     if (callLinkStatus.isProved()) {
1099         addToGraph(Phantom, callTarget, thisArgument);
1100         return;
1101     }
1102     
1103     ASSERT(callLinkStatus.canOptimize());
1104     
1105     if (JSFunction* function = callLinkStatus.function())
1106         addToGraph(CheckFunction, OpInfo(m_graph.freeze(function)), callTarget, thisArgument);
1107     else {
1108         ASSERT(callLinkStatus.structure());
1109         ASSERT(callLinkStatus.executable());
1110         
1111         addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(callLinkStatus.structure())), callTarget);
1112         addToGraph(CheckExecutable, OpInfo(callLinkStatus.executable()), callTarget, thisArgument);
1113     }
1114 }
1115
1116 void ByteCodeParser::emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis, CodeSpecializationKind kind)
1117 {
1118     for (int i = kind == CodeForCall ? 0 : 1; i < argumentCountIncludingThis; ++i)
1119         addToGraph(Phantom, get(virtualRegisterForArgument(i, registerOffset)));
1120 }
1121
1122 bool ByteCodeParser::handleInlining(Node* callTargetNode, int resultOperand, const CallLinkStatus& callLinkStatus, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind kind)
1123 {
1124     static const bool verbose = false;
1125     
1126     CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1127     
1128     if (verbose)
1129         dataLog("Considering inlining ", callLinkStatus, " into ", currentCodeOrigin(), "\n");
1130     
1131     // First, the really simple checks: do we have an actual JS function?
1132     if (!callLinkStatus.executable()) {
1133         if (verbose)
1134             dataLog("    Failing because there is no executable.\n");
1135         return false;
1136     }
1137     if (callLinkStatus.executable()->isHostFunction()) {
1138         if (verbose)
1139             dataLog("    Failing because it's a host function.\n");
1140         return false;
1141     }
1142     
1143     FunctionExecutable* executable = jsCast<FunctionExecutable*>(callLinkStatus.executable());
1144     
1145     // Does the number of arguments we're passing match the arity of the target? We currently
1146     // inline only if the number of arguments passed is greater than or equal to the number
1147     // arguments expected.
1148     if (static_cast<int>(executable->parameterCount()) + 1 > argumentCountIncludingThis) {
1149         if (verbose)
1150             dataLog("    Failing because of arity mismatch.\n");
1151         return false;
1152     }
1153     
1154     // Do we have a code block, and does the code block's size match the heuristics/requirements for
1155     // being an inline candidate? We might not have a code block if code was thrown away or if we
1156     // simply hadn't actually made this call yet. We could still theoretically attempt to inline it
1157     // if we had a static proof of what was being called; this might happen for example if you call a
1158     // global function, where watchpointing gives us static information. Overall, it's a rare case
1159     // because we expect that any hot callees would have already been compiled.
1160     CodeBlock* codeBlock = executable->baselineCodeBlockFor(specializationKind);
1161     if (!codeBlock) {
1162         if (verbose)
1163             dataLog("    Failing because no code block available.\n");
1164         return false;
1165     }
1166     CapabilityLevel capabilityLevel = inlineFunctionForCapabilityLevel(
1167         codeBlock, specializationKind, callLinkStatus.isClosureCall());
1168     if (!canInline(capabilityLevel)) {
1169         if (verbose)
1170             dataLog("    Failing because the function is not inlineable.\n");
1171         return false;
1172     }
1173     
1174     // Check if the caller is already too large. We do this check here because that's just
1175     // where we happen to also have the callee's code block, and we want that for the
1176     // purpose of unsetting SABI.
1177     if (!isSmallEnoughToInlineCodeInto(m_codeBlock)) {
1178         codeBlock->m_shouldAlwaysBeInlined = false;
1179         if (verbose)
1180             dataLog("    Failing because the caller is too large.\n");
1181         return false;
1182     }
1183     
1184     // FIXME: this should be better at predicting how much bloat we will introduce by inlining
1185     // this function.
1186     // https://bugs.webkit.org/show_bug.cgi?id=127627
1187     
1188     // Have we exceeded inline stack depth, or are we trying to inline a recursive call to
1189     // too many levels? If either of these are detected, then don't inline. We adjust our
1190     // heuristics if we are dealing with a function that cannot otherwise be compiled.
1191     
1192     unsigned depth = 0;
1193     unsigned recursion = 0;
1194     
1195     for (InlineStackEntry* entry = m_inlineStackTop; entry; entry = entry->m_caller) {
1196         ++depth;
1197         if (depth >= Options::maximumInliningDepth()) {
1198             if (verbose)
1199                 dataLog("    Failing because depth exceeded.\n");
1200             return false;
1201         }
1202         
1203         if (entry->executable() == executable) {
1204             ++recursion;
1205             if (recursion >= Options::maximumInliningRecursion()) {
1206                 if (verbose)
1207                     dataLog("    Failing because recursion detected.\n");
1208                 return false;
1209             }
1210         }
1211     }
1212     
1213     if (verbose)
1214         dataLog("    Committing to inlining.\n");
1215     
1216     // Now we know without a doubt that we are committed to inlining. So begin the process
1217     // by checking the callee (if necessary) and making sure that arguments and the callee
1218     // are flushed.
1219     emitFunctionChecks(callLinkStatus, callTargetNode, registerOffset, specializationKind);
1220     
1221     // FIXME: Don't flush constants!
1222     
1223     int inlineCallFrameStart = m_inlineStackTop->remapOperand(VirtualRegister(registerOffset)).offset() + JSStack::CallFrameHeaderSize;
1224     
1225     ensureLocals(
1226         VirtualRegister(inlineCallFrameStart).toLocal() + 1 +
1227         JSStack::CallFrameHeaderSize + codeBlock->m_numCalleeRegisters);
1228     
1229     size_t argumentPositionStart = m_graph.m_argumentPositions.size();
1230
1231     VirtualRegister resultReg(resultOperand);
1232     if (resultReg.isValid())
1233         resultReg = m_inlineStackTop->remapOperand(resultReg);
1234     
1235     InlineStackEntry inlineStackEntry(
1236         this, codeBlock, codeBlock, m_graph.lastBlock(), callLinkStatus.function(), resultReg,
1237         (VirtualRegister)inlineCallFrameStart, argumentCountIncludingThis, kind);
1238     
1239     // This is where the actual inlining really happens.
1240     unsigned oldIndex = m_currentIndex;
1241     m_currentIndex = 0;
1242
1243     InlineVariableData inlineVariableData;
1244     inlineVariableData.inlineCallFrame = m_inlineStackTop->m_inlineCallFrame;
1245     inlineVariableData.argumentPositionStart = argumentPositionStart;
1246     inlineVariableData.calleeVariable = 0;
1247     
1248     RELEASE_ASSERT(
1249         m_inlineStackTop->m_inlineCallFrame->isClosureCall
1250         == callLinkStatus.isClosureCall());
1251     if (callLinkStatus.isClosureCall()) {
1252         VariableAccessData* calleeVariable =
1253             set(VirtualRegister(JSStack::Callee), callTargetNode, ImmediateNakedSet)->variableAccessData();
1254         VariableAccessData* scopeVariable =
1255             set(VirtualRegister(JSStack::ScopeChain), addToGraph(GetScope, callTargetNode), ImmediateNakedSet)->variableAccessData();
1256         
1257         calleeVariable->mergeShouldNeverUnbox(true);
1258         scopeVariable->mergeShouldNeverUnbox(true);
1259         
1260         inlineVariableData.calleeVariable = calleeVariable;
1261     }
1262     
1263     m_graph.m_inlineVariableData.append(inlineVariableData);
1264     
1265     parseCodeBlock();
1266     prepareToParseBlock(); // Reset our state now that we're back to the outer code.
1267     
1268     m_currentIndex = oldIndex;
1269     
1270     // If the inlined code created some new basic blocks, then we have linking to do.
1271     if (inlineStackEntry.m_callsiteBlockHead != m_graph.lastBlock()) {
1272         
1273         ASSERT(!inlineStackEntry.m_unlinkedBlocks.isEmpty());
1274         if (inlineStackEntry.m_callsiteBlockHeadNeedsLinking)
1275             linkBlock(inlineStackEntry.m_callsiteBlockHead, inlineStackEntry.m_blockLinkingTargets);
1276         else
1277             ASSERT(inlineStackEntry.m_callsiteBlockHead->isLinked);
1278         
1279         // It's possible that the callsite block head is not owned by the caller.
1280         if (!inlineStackEntry.m_caller->m_unlinkedBlocks.isEmpty()) {
1281             // It's definitely owned by the caller, because the caller created new blocks.
1282             // Assert that this all adds up.
1283             ASSERT(inlineStackEntry.m_caller->m_unlinkedBlocks.last().m_block == inlineStackEntry.m_callsiteBlockHead);
1284             ASSERT(inlineStackEntry.m_caller->m_unlinkedBlocks.last().m_needsNormalLinking);
1285             inlineStackEntry.m_caller->m_unlinkedBlocks.last().m_needsNormalLinking = false;
1286         } else {
1287             // It's definitely not owned by the caller. Tell the caller that he does not
1288             // need to link his callsite block head, because we did it for him.
1289             ASSERT(inlineStackEntry.m_caller->m_callsiteBlockHeadNeedsLinking);
1290             ASSERT(inlineStackEntry.m_caller->m_callsiteBlockHead == inlineStackEntry.m_callsiteBlockHead);
1291             inlineStackEntry.m_caller->m_callsiteBlockHeadNeedsLinking = false;
1292         }
1293         
1294         linkBlocks(inlineStackEntry.m_unlinkedBlocks, inlineStackEntry.m_blockLinkingTargets);
1295     } else
1296         ASSERT(inlineStackEntry.m_unlinkedBlocks.isEmpty());
1297     
1298     BasicBlock* lastBlock = m_graph.lastBlock();
1299     // If there was a return, but no early returns, then we're done. We allow parsing of
1300     // the caller to continue in whatever basic block we're in right now.
1301     if (!inlineStackEntry.m_didEarlyReturn && inlineStackEntry.m_didReturn) {
1302         ASSERT(lastBlock->isEmpty() || !lastBlock->last()->isTerminal());
1303         
1304         // If we created new blocks then the last block needs linking, but in the
1305         // caller. It doesn't need to be linked to, but it needs outgoing links.
1306         if (!inlineStackEntry.m_unlinkedBlocks.isEmpty()) {
1307             // For debugging purposes, set the bytecodeBegin. Note that this doesn't matter
1308             // for release builds because this block will never serve as a potential target
1309             // in the linker's binary search.
1310             lastBlock->bytecodeBegin = m_currentIndex;
1311             m_inlineStackTop->m_caller->m_unlinkedBlocks.append(UnlinkedBlock(m_graph.lastBlock()));
1312         }
1313         
1314         m_currentBlock = m_graph.lastBlock();
1315         return true;
1316     }
1317     
1318     // If we get to this point then all blocks must end in some sort of terminals.
1319     ASSERT(lastBlock->last()->isTerminal());
1320     
1321
1322     // Need to create a new basic block for the continuation at the caller.
1323     RefPtr<BasicBlock> block = adoptRef(new BasicBlock(nextOffset, m_numArguments, m_numLocals, PNaN));
1324
1325     // Link the early returns to the basic block we're about to create.
1326     for (size_t i = 0; i < inlineStackEntry.m_unlinkedBlocks.size(); ++i) {
1327         if (!inlineStackEntry.m_unlinkedBlocks[i].m_needsEarlyReturnLinking)
1328             continue;
1329         BasicBlock* blockToLink = inlineStackEntry.m_unlinkedBlocks[i].m_block;
1330         ASSERT(!blockToLink->isLinked);
1331         Node* node = blockToLink->last();
1332         ASSERT(node->op() == Jump);
1333         ASSERT(!node->targetBlock());
1334         node->targetBlock() = block.get();
1335         inlineStackEntry.m_unlinkedBlocks[i].m_needsEarlyReturnLinking = false;
1336 #if !ASSERT_DISABLED
1337         blockToLink->isLinked = true;
1338 #endif
1339     }
1340     
1341     m_currentBlock = block.get();
1342     ASSERT(m_inlineStackTop->m_caller->m_blockLinkingTargets.isEmpty() || m_inlineStackTop->m_caller->m_blockLinkingTargets.last()->bytecodeBegin < nextOffset);
1343     m_inlineStackTop->m_caller->m_unlinkedBlocks.append(UnlinkedBlock(block.get()));
1344     m_inlineStackTop->m_caller->m_blockLinkingTargets.append(block.get());
1345     m_graph.appendBlock(block);
1346     prepareToParseBlock();
1347     
1348     // At this point we return and continue to generate code for the caller, but
1349     // in the new basic block.
1350     return true;
1351 }
1352
1353 bool ByteCodeParser::handleMinMax(int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis)
1354 {
1355     if (argumentCountIncludingThis == 1) { // Math.min()
1356         set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN)));
1357         return true;
1358     }
1359      
1360     if (argumentCountIncludingThis == 2) { // Math.min(x)
1361         Node* result = get(VirtualRegister(virtualRegisterForArgument(1, registerOffset)));
1362         addToGraph(Phantom, Edge(result, NumberUse));
1363         set(VirtualRegister(resultOperand), result);
1364         return true;
1365     }
1366     
1367     if (argumentCountIncludingThis == 3) { // Math.min(x, y)
1368         set(VirtualRegister(resultOperand), addToGraph(op, get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset))));
1369         return true;
1370     }
1371     
1372     // Don't handle >=3 arguments for now.
1373     return false;
1374 }
1375
1376 bool ByteCodeParser::handleIntrinsic(int resultOperand, Intrinsic intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction)
1377 {
1378     switch (intrinsic) {
1379     case AbsIntrinsic: {
1380         if (argumentCountIncludingThis == 1) { // Math.abs()
1381             set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN)));
1382             return true;
1383         }
1384
1385         if (!MacroAssembler::supportsFloatingPointAbs())
1386             return false;
1387
1388         Node* node = addToGraph(ArithAbs, get(virtualRegisterForArgument(1, registerOffset)));
1389         if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
1390             node->mergeFlags(NodeMayOverflowInDFG);
1391         set(VirtualRegister(resultOperand), node);
1392         return true;
1393     }
1394
1395     case MinIntrinsic:
1396         return handleMinMax(resultOperand, ArithMin, registerOffset, argumentCountIncludingThis);
1397         
1398     case MaxIntrinsic:
1399         return handleMinMax(resultOperand, ArithMax, registerOffset, argumentCountIncludingThis);
1400         
1401     case SqrtIntrinsic:
1402     case CosIntrinsic:
1403     case SinIntrinsic: {
1404         if (argumentCountIncludingThis == 1) {
1405             set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN)));
1406             return true;
1407         }
1408         
1409         switch (intrinsic) {
1410         case SqrtIntrinsic:
1411             if (!MacroAssembler::supportsFloatingPointSqrt())
1412                 return false;
1413             
1414             set(VirtualRegister(resultOperand), addToGraph(ArithSqrt, get(virtualRegisterForArgument(1, registerOffset))));
1415             return true;
1416             
1417         case CosIntrinsic:
1418             set(VirtualRegister(resultOperand), addToGraph(ArithCos, get(virtualRegisterForArgument(1, registerOffset))));
1419             return true;
1420             
1421         case SinIntrinsic:
1422             set(VirtualRegister(resultOperand), addToGraph(ArithSin, get(virtualRegisterForArgument(1, registerOffset))));
1423             return true;
1424             
1425         default:
1426             RELEASE_ASSERT_NOT_REACHED();
1427             return false;
1428         }
1429     }
1430         
1431     case ArrayPushIntrinsic: {
1432         if (argumentCountIncludingThis != 2)
1433             return false;
1434         
1435         ArrayMode arrayMode = getArrayMode(m_currentInstruction[OPCODE_LENGTH(op_call) - 2].u.arrayProfile);
1436         if (!arrayMode.isJSArray())
1437             return false;
1438         switch (arrayMode.type()) {
1439         case Array::Undecided:
1440         case Array::Int32:
1441         case Array::Double:
1442         case Array::Contiguous:
1443         case Array::ArrayStorage: {
1444             Node* arrayPush = addToGraph(ArrayPush, OpInfo(arrayMode.asWord()), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)));
1445             set(VirtualRegister(resultOperand), arrayPush);
1446             
1447             return true;
1448         }
1449             
1450         default:
1451             return false;
1452         }
1453     }
1454         
1455     case ArrayPopIntrinsic: {
1456         if (argumentCountIncludingThis != 1)
1457             return false;
1458         
1459         ArrayMode arrayMode = getArrayMode(m_currentInstruction[OPCODE_LENGTH(op_call) - 2].u.arrayProfile);
1460         if (!arrayMode.isJSArray())
1461             return false;
1462         switch (arrayMode.type()) {
1463         case Array::Int32:
1464         case Array::Double:
1465         case Array::Contiguous:
1466         case Array::ArrayStorage: {
1467             Node* arrayPop = addToGraph(ArrayPop, OpInfo(arrayMode.asWord()), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)));
1468             set(VirtualRegister(resultOperand), arrayPop);
1469             return true;
1470         }
1471             
1472         default:
1473             return false;
1474         }
1475     }
1476
1477     case CharCodeAtIntrinsic: {
1478         if (argumentCountIncludingThis != 2)
1479             return false;
1480
1481         VirtualRegister thisOperand = virtualRegisterForArgument(0, registerOffset);
1482         VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
1483         Node* charCode = addToGraph(StringCharCodeAt, OpInfo(ArrayMode(Array::String).asWord()), get(thisOperand), get(indexOperand));
1484
1485         set(VirtualRegister(resultOperand), charCode);
1486         return true;
1487     }
1488
1489     case CharAtIntrinsic: {
1490         if (argumentCountIncludingThis != 2)
1491             return false;
1492
1493         VirtualRegister thisOperand = virtualRegisterForArgument(0, registerOffset);
1494         VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
1495         Node* charCode = addToGraph(StringCharAt, OpInfo(ArrayMode(Array::String).asWord()), get(thisOperand), get(indexOperand));
1496
1497         set(VirtualRegister(resultOperand), charCode);
1498         return true;
1499     }
1500     case FromCharCodeIntrinsic: {
1501         if (argumentCountIncludingThis != 2)
1502             return false;
1503
1504         VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
1505         Node* charCode = addToGraph(StringFromCharCode, get(indexOperand));
1506
1507         set(VirtualRegister(resultOperand), charCode);
1508
1509         return true;
1510     }
1511
1512     case RegExpExecIntrinsic: {
1513         if (argumentCountIncludingThis != 2)
1514             return false;
1515         
1516         Node* regExpExec = addToGraph(RegExpExec, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)));
1517         set(VirtualRegister(resultOperand), regExpExec);
1518         
1519         return true;
1520     }
1521         
1522     case RegExpTestIntrinsic: {
1523         if (argumentCountIncludingThis != 2)
1524             return false;
1525         
1526         Node* regExpExec = addToGraph(RegExpTest, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)));
1527         set(VirtualRegister(resultOperand), regExpExec);
1528         
1529         return true;
1530     }
1531
1532     case IMulIntrinsic: {
1533         if (argumentCountIncludingThis != 3)
1534             return false;
1535         VirtualRegister leftOperand = virtualRegisterForArgument(1, registerOffset);
1536         VirtualRegister rightOperand = virtualRegisterForArgument(2, registerOffset);
1537         Node* left = get(leftOperand);
1538         Node* right = get(rightOperand);
1539         set(VirtualRegister(resultOperand), addToGraph(ArithIMul, left, right));
1540         return true;
1541     }
1542         
1543     case FRoundIntrinsic: {
1544         if (argumentCountIncludingThis != 2)
1545             return false;
1546         VirtualRegister operand = virtualRegisterForArgument(1, registerOffset);
1547         set(VirtualRegister(resultOperand), addToGraph(ArithFRound, get(operand)));
1548         return true;
1549     }
1550         
1551     case DFGTrueIntrinsic: {
1552         set(VirtualRegister(resultOperand), jsConstant(jsBoolean(true)));
1553         return true;
1554     }
1555         
1556     case OSRExitIntrinsic: {
1557         addToGraph(ForceOSRExit);
1558         set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantUndefined)));
1559         return true;
1560     }
1561         
1562     case IsFinalTierIntrinsic: {
1563         set(VirtualRegister(resultOperand),
1564             jsConstant(jsBoolean(Options::useFTLJIT() ? isFTL(m_graph.m_plan.mode) : true)));
1565         return true;
1566     }
1567         
1568     case SetInt32HeapPredictionIntrinsic: {
1569         for (int i = 1; i < argumentCountIncludingThis; ++i) {
1570             Node* node = get(virtualRegisterForArgument(i, registerOffset));
1571             if (node->hasHeapPrediction())
1572                 node->setHeapPrediction(SpecInt32);
1573         }
1574         set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantUndefined)));
1575         return true;
1576     }
1577         
1578     case FiatInt52Intrinsic: {
1579         if (argumentCountIncludingThis != 2)
1580             return false;
1581         VirtualRegister operand = virtualRegisterForArgument(1, registerOffset);
1582         if (enableInt52())
1583             set(VirtualRegister(resultOperand), addToGraph(FiatInt52, get(operand)));
1584         else
1585             set(VirtualRegister(resultOperand), get(operand));
1586         return true;
1587     }
1588         
1589     default:
1590         return false;
1591     }
1592 }
1593
1594 bool ByteCodeParser::handleTypedArrayConstructor(
1595     int resultOperand, InternalFunction* function, int registerOffset,
1596     int argumentCountIncludingThis, TypedArrayType type)
1597 {
1598     if (!isTypedView(type))
1599         return false;
1600     
1601     if (function->classInfo() != constructorClassInfoForType(type))
1602         return false;
1603     
1604     if (function->globalObject() != m_inlineStackTop->m_codeBlock->globalObject())
1605         return false;
1606     
1607     // We only have an intrinsic for the case where you say:
1608     //
1609     // new FooArray(blah);
1610     //
1611     // Of course, 'blah' could be any of the following:
1612     //
1613     // - Integer, indicating that you want to allocate an array of that length.
1614     //   This is the thing we're hoping for, and what we can actually do meaningful
1615     //   optimizations for.
1616     //
1617     // - Array buffer, indicating that you want to create a view onto that _entire_
1618     //   buffer.
1619     //
1620     // - Non-buffer object, indicating that you want to create a copy of that
1621     //   object by pretending that it quacks like an array.
1622     //
1623     // - Anything else, indicating that you want to have an exception thrown at
1624     //   you.
1625     //
1626     // The intrinsic, NewTypedArray, will behave as if it could do any of these
1627     // things up until we do Fixup. Thereafter, if child1 (i.e. 'blah') is
1628     // predicted Int32, then we lock it in as a normal typed array allocation.
1629     // Otherwise, NewTypedArray turns into a totally opaque function call that
1630     // may clobber the world - by virtue of it accessing properties on what could
1631     // be an object.
1632     //
1633     // Note that although the generic form of NewTypedArray sounds sort of awful,
1634     // it is actually quite likely to be more efficient than a fully generic
1635     // Construct. So, we might want to think about making NewTypedArray variadic,
1636     // or else making Construct not super slow.
1637     
1638     if (argumentCountIncludingThis != 2)
1639         return false;
1640     
1641     set(VirtualRegister(resultOperand),
1642         addToGraph(NewTypedArray, OpInfo(type), get(virtualRegisterForArgument(1, registerOffset))));
1643     return true;
1644 }
1645
1646 bool ByteCodeParser::handleConstantInternalFunction(
1647     int resultOperand, InternalFunction* function, int registerOffset,
1648     int argumentCountIncludingThis, SpeculatedType prediction, CodeSpecializationKind kind)
1649 {
1650     // If we ever find that we have a lot of internal functions that we specialize for,
1651     // then we should probably have some sort of hashtable dispatch, or maybe even
1652     // dispatch straight through the MethodTable of the InternalFunction. But for now,
1653     // it seems that this case is hit infrequently enough, and the number of functions
1654     // we know about is small enough, that having just a linear cascade of if statements
1655     // is good enough.
1656     
1657     UNUSED_PARAM(prediction); // Remove this once we do more things.
1658     
1659     if (function->classInfo() == ArrayConstructor::info()) {
1660         if (function->globalObject() != m_inlineStackTop->m_codeBlock->globalObject())
1661             return false;
1662         
1663         if (argumentCountIncludingThis == 2) {
1664             set(VirtualRegister(resultOperand),
1665                 addToGraph(NewArrayWithSize, OpInfo(ArrayWithUndecided), get(virtualRegisterForArgument(1, registerOffset))));
1666             return true;
1667         }
1668         
1669         for (int i = 1; i < argumentCountIncludingThis; ++i)
1670             addVarArgChild(get(virtualRegisterForArgument(i, registerOffset)));
1671         set(VirtualRegister(resultOperand),
1672             addToGraph(Node::VarArg, NewArray, OpInfo(ArrayWithUndecided), OpInfo(0)));
1673         return true;
1674     }
1675     
1676     if (function->classInfo() == StringConstructor::info()) {
1677         Node* result;
1678         
1679         if (argumentCountIncludingThis <= 1)
1680             result = jsConstant(m_vm->smallStrings.emptyString());
1681         else
1682             result = addToGraph(ToString, get(virtualRegisterForArgument(1, registerOffset)));
1683         
1684         if (kind == CodeForConstruct)
1685             result = addToGraph(NewStringObject, OpInfo(function->globalObject()->stringObjectStructure()), result);
1686         
1687         set(VirtualRegister(resultOperand), result);
1688         return true;
1689     }
1690     
1691     for (unsigned typeIndex = 0; typeIndex < NUMBER_OF_TYPED_ARRAY_TYPES; ++typeIndex) {
1692         bool result = handleTypedArrayConstructor(
1693             resultOperand, function, registerOffset, argumentCountIncludingThis,
1694             indexToTypedArrayType(typeIndex));
1695         if (result)
1696             return true;
1697     }
1698     
1699     return false;
1700 }
1701
1702 Node* ByteCodeParser::handleGetByOffset(SpeculatedType prediction, Node* base, const StructureSet& structureSet, unsigned identifierNumber, PropertyOffset offset, NodeType op)
1703 {
1704     if (base->hasConstant()) {
1705         if (JSValue constant = m_graph.tryGetConstantProperty(base->asJSValue(), structureSet, offset)) {
1706             addToGraph(Phantom, base);
1707             return weakJSConstant(constant);
1708         }
1709     }
1710     
1711     Node* propertyStorage;
1712     if (isInlineOffset(offset))
1713         propertyStorage = base;
1714     else
1715         propertyStorage = addToGraph(GetButterfly, base);
1716     Node* getByOffset = addToGraph(op, OpInfo(m_graph.m_storageAccessData.size()), OpInfo(prediction), propertyStorage, base);
1717
1718     StorageAccessData storageAccessData;
1719     storageAccessData.offset = offset;
1720     storageAccessData.identifierNumber = identifierNumber;
1721     m_graph.m_storageAccessData.append(storageAccessData);
1722
1723     return getByOffset;
1724 }
1725
1726 Node* ByteCodeParser::handlePutByOffset(Node* base, unsigned identifier, PropertyOffset offset, Node* value)
1727 {
1728     Node* propertyStorage;
1729     if (isInlineOffset(offset))
1730         propertyStorage = base;
1731     else
1732         propertyStorage = addToGraph(GetButterfly, base);
1733     Node* result = addToGraph(PutByOffset, OpInfo(m_graph.m_storageAccessData.size()), propertyStorage, base, value);
1734     
1735     StorageAccessData storageAccessData;
1736     storageAccessData.offset = offset;
1737     storageAccessData.identifierNumber = identifier;
1738     m_graph.m_storageAccessData.append(storageAccessData);
1739
1740     return result;
1741 }
1742
1743 void ByteCodeParser::emitChecks(const ConstantStructureCheckVector& vector)
1744 {
1745     for (unsigned i = 0; i < vector.size(); ++i)
1746         cellConstantWithStructureCheck(vector[i].constant(), vector[i].structure());
1747 }
1748
1749 void ByteCodeParser::handleGetById(
1750     int destinationOperand, SpeculatedType prediction, Node* base, unsigned identifierNumber,
1751     const GetByIdStatus& getByIdStatus)
1752 {
1753     NodeType getById = getByIdStatus.makesCalls() ? GetByIdFlush : GetById;
1754     
1755     if (!getByIdStatus.isSimple() || !Options::enableAccessInlining()) {
1756         set(VirtualRegister(destinationOperand),
1757             addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base));
1758         return;
1759     }
1760     
1761     if (getByIdStatus.numVariants() > 1) {
1762         if (getByIdStatus.makesCalls() || !isFTL(m_graph.m_plan.mode)
1763             || !Options::enablePolymorphicAccessInlining()) {
1764             set(VirtualRegister(destinationOperand),
1765                 addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base));
1766             return;
1767         }
1768         
1769         if (m_graph.compilation())
1770             m_graph.compilation()->noticeInlinedGetById();
1771     
1772         // 1) Emit prototype structure checks for all chains. This could sort of maybe not be
1773         //    optimal, if there is some rarely executed case in the chain that requires a lot
1774         //    of checks and those checks are not watchpointable.
1775         for (unsigned variantIndex = getByIdStatus.numVariants(); variantIndex--;)
1776             emitChecks(getByIdStatus[variantIndex].constantChecks());
1777         
1778         // 2) Emit a MultiGetByOffset
1779         MultiGetByOffsetData* data = m_graph.m_multiGetByOffsetData.add();
1780         data->variants = getByIdStatus.variants();
1781         data->identifierNumber = identifierNumber;
1782         set(VirtualRegister(destinationOperand),
1783             addToGraph(MultiGetByOffset, OpInfo(data), OpInfo(prediction), base));
1784         return;
1785     }
1786     
1787     ASSERT(getByIdStatus.numVariants() == 1);
1788     GetByIdVariant variant = getByIdStatus[0];
1789                 
1790     if (m_graph.compilation())
1791         m_graph.compilation()->noticeInlinedGetById();
1792     
1793     Node* originalBase = base;
1794                 
1795     addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(variant.structureSet())), base);
1796     
1797     emitChecks(variant.constantChecks());
1798
1799     if (variant.alternateBase())
1800         base = weakJSConstant(variant.alternateBase());
1801     
1802     // Unless we want bugs like https://bugs.webkit.org/show_bug.cgi?id=88783, we need to
1803     // ensure that the base of the original get_by_id is kept alive until we're done with
1804     // all of the speculations. We only insert the Phantom if there had been a CheckStructure
1805     // on something other than the base following the CheckStructure on base.
1806     if (originalBase != base)
1807         addToGraph(Phantom, originalBase);
1808     
1809     Node* loadedValue = handleGetByOffset(
1810         variant.callLinkStatus() ? SpecCellOther : prediction,
1811         base, variant.baseStructure(), identifierNumber, variant.offset(),
1812         variant.callLinkStatus() ? GetGetterSetterByOffset : GetByOffset);
1813     
1814     if (!variant.callLinkStatus()) {
1815         set(VirtualRegister(destinationOperand), loadedValue);
1816         return;
1817     }
1818     
1819     Node* getter = addToGraph(GetGetter, loadedValue);
1820     
1821     // Make a call. We don't try to get fancy with using the smallest operand number because
1822     // the stack layout phase should compress the stack anyway.
1823     
1824     unsigned numberOfParameters = 0;
1825     numberOfParameters++; // The 'this' argument.
1826     numberOfParameters++; // True return PC.
1827     
1828     // Start with a register offset that corresponds to the last in-use register.
1829     int registerOffset = virtualRegisterForLocal(
1830         m_inlineStackTop->m_profiledBlock->m_numCalleeRegisters - 1).offset();
1831     registerOffset -= numberOfParameters;
1832     registerOffset -= JSStack::CallFrameHeaderSize;
1833     
1834     // Get the alignment right.
1835     registerOffset = -WTF::roundUpToMultipleOf(
1836         stackAlignmentRegisters(),
1837         -registerOffset);
1838     
1839     ensureLocals(
1840         m_inlineStackTop->remapOperand(
1841             VirtualRegister(registerOffset)).toLocal());
1842     
1843     // Issue SetLocals. This has two effects:
1844     // 1) That's how handleCall() sees the arguments.
1845     // 2) If we inline then this ensures that the arguments are flushed so that if you use
1846     //    the dreaded arguments object on the getter, the right things happen. Well, sort of -
1847     //    since we only really care about 'this' in this case. But we're not going to take that
1848     //    shortcut.
1849     int nextRegister = registerOffset + JSStack::CallFrameHeaderSize;
1850     set(VirtualRegister(nextRegister++), originalBase, ImmediateNakedSet);
1851     
1852     handleCall(
1853         destinationOperand, Call, InlineCallFrame::GetterCall, OPCODE_LENGTH(op_get_by_id),
1854         getter, numberOfParameters - 1, registerOffset, *variant.callLinkStatus(), prediction);
1855 }
1856
1857 void ByteCodeParser::emitPutById(
1858     Node* base, unsigned identifierNumber, Node* value, const PutByIdStatus& putByIdStatus, bool isDirect)
1859 {
1860     if (isDirect)
1861         addToGraph(PutByIdDirect, OpInfo(identifierNumber), base, value);
1862     else
1863         addToGraph(putByIdStatus.makesCalls() ? PutByIdFlush : PutById, OpInfo(identifierNumber), base, value);
1864 }
1865
1866 void ByteCodeParser::handlePutById(
1867     Node* base, unsigned identifierNumber, Node* value,
1868     const PutByIdStatus& putByIdStatus, bool isDirect)
1869 {
1870     if (!putByIdStatus.isSimple() || !Options::enableAccessInlining()) {
1871         if (!putByIdStatus.isSet())
1872             addToGraph(ForceOSRExit);
1873         emitPutById(base, identifierNumber, value, putByIdStatus, isDirect);
1874         return;
1875     }
1876     
1877     if (putByIdStatus.numVariants() > 1) {
1878         if (!isFTL(m_graph.m_plan.mode) || putByIdStatus.makesCalls()
1879             || !Options::enablePolymorphicAccessInlining()) {
1880             emitPutById(base, identifierNumber, value, putByIdStatus, isDirect);
1881             return;
1882         }
1883         
1884         if (m_graph.compilation())
1885             m_graph.compilation()->noticeInlinedPutById();
1886         
1887         if (!isDirect) {
1888             for (unsigned variantIndex = putByIdStatus.numVariants(); variantIndex--;) {
1889                 if (putByIdStatus[variantIndex].kind() != PutByIdVariant::Transition)
1890                     continue;
1891                 emitChecks(putByIdStatus[variantIndex].constantChecks());
1892             }
1893         }
1894         
1895         MultiPutByOffsetData* data = m_graph.m_multiPutByOffsetData.add();
1896         data->variants = putByIdStatus.variants();
1897         data->identifierNumber = identifierNumber;
1898         addToGraph(MultiPutByOffset, OpInfo(data), base, value);
1899         return;
1900     }
1901     
1902     ASSERT(putByIdStatus.numVariants() == 1);
1903     const PutByIdVariant& variant = putByIdStatus[0];
1904     
1905     switch (variant.kind()) {
1906     case PutByIdVariant::Replace: {
1907         addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(variant.structure())), base);
1908         handlePutByOffset(base, identifierNumber, variant.offset(), value);
1909         if (m_graph.compilation())
1910             m_graph.compilation()->noticeInlinedPutById();
1911         return;
1912     }
1913     
1914     case PutByIdVariant::Transition: {
1915         addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(variant.oldStructure())), base);
1916         emitChecks(variant.constantChecks());
1917
1918         ASSERT(variant.oldStructureForTransition()->transitionWatchpointSetHasBeenInvalidated());
1919     
1920         Node* propertyStorage;
1921         Transition* transition = m_graph.m_transitions.add(
1922             variant.oldStructureForTransition(), variant.newStructure());
1923
1924         if (variant.reallocatesStorage()) {
1925
1926             // If we're growing the property storage then it must be because we're
1927             // storing into the out-of-line storage.
1928             ASSERT(!isInlineOffset(variant.offset()));
1929
1930             if (!variant.oldStructureForTransition()->outOfLineCapacity()) {
1931                 propertyStorage = addToGraph(
1932                     AllocatePropertyStorage, OpInfo(transition), base);
1933             } else {
1934                 propertyStorage = addToGraph(
1935                     ReallocatePropertyStorage, OpInfo(transition),
1936                     base, addToGraph(GetButterfly, base));
1937             }
1938         } else {
1939             if (isInlineOffset(variant.offset()))
1940                 propertyStorage = base;
1941             else
1942                 propertyStorage = addToGraph(GetButterfly, base);
1943         }
1944
1945         addToGraph(PutStructure, OpInfo(transition), base);
1946
1947         addToGraph(
1948             PutByOffset,
1949             OpInfo(m_graph.m_storageAccessData.size()),
1950             propertyStorage,
1951             base,
1952             value);
1953
1954         StorageAccessData storageAccessData;
1955         storageAccessData.offset = variant.offset();
1956         storageAccessData.identifierNumber = identifierNumber;
1957         m_graph.m_storageAccessData.append(storageAccessData);
1958
1959         if (m_graph.compilation())
1960             m_graph.compilation()->noticeInlinedPutById();
1961         return;
1962     }
1963         
1964     case PutByIdVariant::Setter: {
1965         Node* originalBase = base;
1966         
1967         addToGraph(
1968             CheckStructure, OpInfo(m_graph.addStructureSet(variant.structure())), base);
1969         
1970         emitChecks(variant.constantChecks());
1971         
1972         if (variant.alternateBase())
1973             base = weakJSConstant(variant.alternateBase());
1974         
1975         Node* loadedValue = handleGetByOffset(
1976             SpecCellOther, base, variant.baseStructure(), identifierNumber, variant.offset(),
1977             GetGetterSetterByOffset);
1978         
1979         Node* setter = addToGraph(GetSetter, loadedValue);
1980         
1981         // Make a call. We don't try to get fancy with using the smallest operand number because
1982         // the stack layout phase should compress the stack anyway.
1983     
1984         unsigned numberOfParameters = 0;
1985         numberOfParameters++; // The 'this' argument.
1986         numberOfParameters++; // The new value.
1987         numberOfParameters++; // True return PC.
1988     
1989         // Start with a register offset that corresponds to the last in-use register.
1990         int registerOffset = virtualRegisterForLocal(
1991             m_inlineStackTop->m_profiledBlock->m_numCalleeRegisters - 1).offset();
1992         registerOffset -= numberOfParameters;
1993         registerOffset -= JSStack::CallFrameHeaderSize;
1994     
1995         // Get the alignment right.
1996         registerOffset = -WTF::roundUpToMultipleOf(
1997             stackAlignmentRegisters(),
1998             -registerOffset);
1999     
2000         ensureLocals(
2001             m_inlineStackTop->remapOperand(
2002                 VirtualRegister(registerOffset)).toLocal());
2003     
2004         int nextRegister = registerOffset + JSStack::CallFrameHeaderSize;
2005         set(VirtualRegister(nextRegister++), originalBase, ImmediateNakedSet);
2006         set(VirtualRegister(nextRegister++), value, ImmediateNakedSet);
2007     
2008         handleCall(
2009             VirtualRegister().offset(), Call, InlineCallFrame::SetterCall,
2010             OPCODE_LENGTH(op_put_by_id), setter, numberOfParameters - 1, registerOffset,
2011             *variant.callLinkStatus(), SpecOther);
2012         return;
2013     }
2014     
2015     default: {
2016         emitPutById(base, identifierNumber, value, putByIdStatus, isDirect);
2017         return;
2018     } }
2019 }
2020
2021 void ByteCodeParser::prepareToParseBlock()
2022 {
2023     m_constants.resize(0);
2024 }
2025
2026 Node* ByteCodeParser::getScope(bool skipTop, unsigned skipCount)
2027 {
2028     Node* localBase = get(VirtualRegister(JSStack::ScopeChain));
2029     if (skipTop) {
2030         ASSERT(!inlineCallFrame());
2031         localBase = addToGraph(SkipTopScope, localBase);
2032     }
2033     for (unsigned n = skipCount; n--;)
2034         localBase = addToGraph(SkipScope, localBase);
2035     return localBase;
2036 }
2037
2038 bool ByteCodeParser::parseBlock(unsigned limit)
2039 {
2040     bool shouldContinueParsing = true;
2041
2042     Interpreter* interpreter = m_vm->interpreter;
2043     Instruction* instructionsBegin = m_inlineStackTop->m_codeBlock->instructions().begin();
2044     unsigned blockBegin = m_currentIndex;
2045     
2046     // If we are the first basic block, introduce markers for arguments. This allows
2047     // us to track if a use of an argument may use the actual argument passed, as
2048     // opposed to using a value we set explicitly.
2049     if (m_currentBlock == m_graph.block(0) && !inlineCallFrame()) {
2050         m_graph.m_arguments.resize(m_numArguments);
2051         for (unsigned argument = 0; argument < m_numArguments; ++argument) {
2052             VariableAccessData* variable = newVariableAccessData(
2053                 virtualRegisterForArgument(argument), m_codeBlock->isCaptured(virtualRegisterForArgument(argument)));
2054             variable->mergeStructureCheckHoistingFailed(
2055                 m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
2056             variable->mergeCheckArrayHoistingFailed(
2057                 m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIndexingType));
2058             
2059             Node* setArgument = addToGraph(SetArgument, OpInfo(variable));
2060             m_graph.m_arguments[argument] = setArgument;
2061             m_currentBlock->variablesAtTail.setArgumentFirstTime(argument, setArgument);
2062         }
2063     }
2064
2065     while (true) {
2066         for (unsigned i = 0; i < m_setLocalQueue.size(); ++i)
2067             m_setLocalQueue[i].execute(this);
2068         m_setLocalQueue.resize(0);
2069         
2070         // Don't extend over jump destinations.
2071         if (m_currentIndex == limit) {
2072             // Ordinarily we want to plant a jump. But refuse to do this if the block is
2073             // empty. This is a special case for inlining, which might otherwise create
2074             // some empty blocks in some cases. When parseBlock() returns with an empty
2075             // block, it will get repurposed instead of creating a new one. Note that this
2076             // logic relies on every bytecode resulting in one or more nodes, which would
2077             // be true anyway except for op_loop_hint, which emits a Phantom to force this
2078             // to be true.
2079             if (!m_currentBlock->isEmpty())
2080                 addToGraph(Jump, OpInfo(m_currentIndex));
2081             return shouldContinueParsing;
2082         }
2083         
2084         // Switch on the current bytecode opcode.
2085         Instruction* currentInstruction = instructionsBegin + m_currentIndex;
2086         m_currentInstruction = currentInstruction; // Some methods want to use this, and we'd rather not thread it through calls.
2087         OpcodeID opcodeID = interpreter->getOpcodeID(currentInstruction->u.opcode);
2088         
2089         if (Options::verboseDFGByteCodeParsing())
2090             dataLog("    parsing ", currentCodeOrigin(), "\n");
2091         
2092         if (m_graph.compilation()) {
2093             addToGraph(CountExecution, OpInfo(m_graph.compilation()->executionCounterFor(
2094                 Profiler::OriginStack(*m_vm->m_perBytecodeProfiler, m_codeBlock, currentCodeOrigin()))));
2095         }
2096         
2097         switch (opcodeID) {
2098
2099         // === Function entry opcodes ===
2100
2101         case op_enter: {
2102             Node* undefined = addToGraph(JSConstant, OpInfo(m_constantUndefined));
2103             // Initialize all locals to undefined.
2104             for (int i = 0; i < m_inlineStackTop->m_codeBlock->m_numVars; ++i)
2105                 set(virtualRegisterForLocal(i), undefined, ImmediateNakedSet);
2106             if (m_inlineStackTop->m_codeBlock->specializationKind() == CodeForConstruct)
2107                 set(virtualRegisterForArgument(0), undefined, ImmediateNakedSet);
2108             NEXT_OPCODE(op_enter);
2109         }
2110             
2111         case op_touch_entry:
2112             if (m_inlineStackTop->m_codeBlock->symbolTable()->m_functionEnteredOnce.isStillValid())
2113                 addToGraph(ForceOSRExit);
2114             NEXT_OPCODE(op_touch_entry);
2115             
2116         case op_to_this: {
2117             Node* op1 = getThis();
2118             if (op1->op() != ToThis) {
2119                 Structure* cachedStructure = currentInstruction[2].u.structure.get();
2120                 if (currentInstruction[2].u.toThisStatus != ToThisOK
2121                     || !cachedStructure
2122                     || cachedStructure->classInfo()->methodTable.toThis != JSObject::info()->methodTable.toThis
2123                     || m_inlineStackTop->m_profiledBlock->couldTakeSlowCase(m_currentIndex)
2124                     || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)
2125                     || (op1->op() == GetLocal && op1->variableAccessData()->structureCheckHoistingFailed())) {
2126                     setThis(addToGraph(ToThis, op1));
2127                 } else {
2128                     addToGraph(
2129                         CheckStructure,
2130                         OpInfo(m_graph.addStructureSet(cachedStructure)),
2131                         op1);
2132                 }
2133             }
2134             NEXT_OPCODE(op_to_this);
2135         }
2136
2137         case op_create_this: {
2138             int calleeOperand = currentInstruction[2].u.operand;
2139             Node* callee = get(VirtualRegister(calleeOperand));
2140             bool alreadyEmitted = false;
2141             if (JSFunction* function = callee->dynamicCastConstant<JSFunction*>()) {
2142                 if (Structure* structure = function->allocationStructure()) {
2143                     addToGraph(AllocationProfileWatchpoint, OpInfo(m_graph.freeze(function)));
2144                     // The callee is still live up to this point.
2145                     addToGraph(Phantom, callee);
2146                     set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(NewObject, OpInfo(structure)));
2147                     alreadyEmitted = true;
2148                 }
2149             }
2150             if (!alreadyEmitted) {
2151                 set(VirtualRegister(currentInstruction[1].u.operand),
2152                     addToGraph(CreateThis, OpInfo(currentInstruction[3].u.operand), callee));
2153             }
2154             NEXT_OPCODE(op_create_this);
2155         }
2156
2157         case op_new_object: {
2158             set(VirtualRegister(currentInstruction[1].u.operand),
2159                 addToGraph(NewObject,
2160                     OpInfo(currentInstruction[3].u.objectAllocationProfile->structure())));
2161             NEXT_OPCODE(op_new_object);
2162         }
2163             
2164         case op_new_array: {
2165             int startOperand = currentInstruction[2].u.operand;
2166             int numOperands = currentInstruction[3].u.operand;
2167             ArrayAllocationProfile* profile = currentInstruction[4].u.arrayAllocationProfile;
2168             for (int operandIdx = startOperand; operandIdx > startOperand - numOperands; --operandIdx)
2169                 addVarArgChild(get(VirtualRegister(operandIdx)));
2170             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(Node::VarArg, NewArray, OpInfo(profile->selectIndexingType()), OpInfo(0)));
2171             NEXT_OPCODE(op_new_array);
2172         }
2173             
2174         case op_new_array_with_size: {
2175             int lengthOperand = currentInstruction[2].u.operand;
2176             ArrayAllocationProfile* profile = currentInstruction[3].u.arrayAllocationProfile;
2177             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(NewArrayWithSize, OpInfo(profile->selectIndexingType()), get(VirtualRegister(lengthOperand))));
2178             NEXT_OPCODE(op_new_array_with_size);
2179         }
2180             
2181         case op_new_array_buffer: {
2182             int startConstant = currentInstruction[2].u.operand;
2183             int numConstants = currentInstruction[3].u.operand;
2184             ArrayAllocationProfile* profile = currentInstruction[4].u.arrayAllocationProfile;
2185             NewArrayBufferData data;
2186             data.startConstant = m_inlineStackTop->m_constantBufferRemap[startConstant];
2187             data.numConstants = numConstants;
2188             data.indexingType = profile->selectIndexingType();
2189
2190             // If this statement has never executed, we'll have the wrong indexing type in the profile.
2191             for (int i = 0; i < numConstants; ++i) {
2192                 data.indexingType =
2193                     leastUpperBoundOfIndexingTypeAndValue(
2194                         data.indexingType,
2195                         m_codeBlock->constantBuffer(data.startConstant)[i]);
2196             }
2197             
2198             m_graph.m_newArrayBufferData.append(data);
2199             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(NewArrayBuffer, OpInfo(&m_graph.m_newArrayBufferData.last())));
2200             NEXT_OPCODE(op_new_array_buffer);
2201         }
2202             
2203         case op_new_regexp: {
2204             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(NewRegexp, OpInfo(currentInstruction[2].u.operand)));
2205             NEXT_OPCODE(op_new_regexp);
2206         }
2207             
2208         case op_get_callee: {
2209             JSCell* cachedFunction = currentInstruction[2].u.jsCell.get();
2210             if (!cachedFunction 
2211                 || m_inlineStackTop->m_profiledBlock->couldTakeSlowCase(m_currentIndex)
2212                 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadFunction)) {
2213                 set(VirtualRegister(currentInstruction[1].u.operand), get(VirtualRegister(JSStack::Callee)));
2214             } else {
2215                 FrozenValue* frozen = m_graph.freeze(cachedFunction);
2216                 ASSERT(cachedFunction->inherits(JSFunction::info()));
2217                 Node* actualCallee = get(VirtualRegister(JSStack::Callee));
2218                 addToGraph(CheckFunction, OpInfo(frozen), actualCallee);
2219                 set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(JSConstant, OpInfo(frozen)));
2220             }
2221             NEXT_OPCODE(op_get_callee);
2222         }
2223
2224         // === Bitwise operations ===
2225
2226         case op_bitand: {
2227             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2228             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2229             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(BitAnd, op1, op2));
2230             NEXT_OPCODE(op_bitand);
2231         }
2232
2233         case op_bitor: {
2234             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2235             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2236             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(BitOr, op1, op2));
2237             NEXT_OPCODE(op_bitor);
2238         }
2239
2240         case op_bitxor: {
2241             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2242             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2243             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(BitXor, op1, op2));
2244             NEXT_OPCODE(op_bitxor);
2245         }
2246
2247         case op_rshift: {
2248             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2249             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2250             set(VirtualRegister(currentInstruction[1].u.operand),
2251                 addToGraph(BitRShift, op1, op2));
2252             NEXT_OPCODE(op_rshift);
2253         }
2254
2255         case op_lshift: {
2256             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2257             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2258             set(VirtualRegister(currentInstruction[1].u.operand),
2259                 addToGraph(BitLShift, op1, op2));
2260             NEXT_OPCODE(op_lshift);
2261         }
2262
2263         case op_urshift: {
2264             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2265             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2266             set(VirtualRegister(currentInstruction[1].u.operand),
2267                 addToGraph(BitURShift, op1, op2));
2268             NEXT_OPCODE(op_urshift);
2269         }
2270             
2271         case op_unsigned: {
2272             set(VirtualRegister(currentInstruction[1].u.operand),
2273                 makeSafe(addToGraph(UInt32ToNumber, get(VirtualRegister(currentInstruction[2].u.operand)))));
2274             NEXT_OPCODE(op_unsigned);
2275         }
2276
2277         // === Increment/Decrement opcodes ===
2278
2279         case op_inc: {
2280             int srcDst = currentInstruction[1].u.operand;
2281             VirtualRegister srcDstVirtualRegister = VirtualRegister(srcDst);
2282             Node* op = get(srcDstVirtualRegister);
2283             set(srcDstVirtualRegister, makeSafe(addToGraph(ArithAdd, op, addToGraph(JSConstant, OpInfo(m_constantOne)))));
2284             NEXT_OPCODE(op_inc);
2285         }
2286
2287         case op_dec: {
2288             int srcDst = currentInstruction[1].u.operand;
2289             VirtualRegister srcDstVirtualRegister = VirtualRegister(srcDst);
2290             Node* op = get(srcDstVirtualRegister);
2291             set(srcDstVirtualRegister, makeSafe(addToGraph(ArithSub, op, addToGraph(JSConstant, OpInfo(m_constantOne)))));
2292             NEXT_OPCODE(op_dec);
2293         }
2294
2295         // === Arithmetic operations ===
2296
2297         case op_add: {
2298             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2299             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2300             if (op1->hasNumberResult() && op2->hasNumberResult())
2301                 set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithAdd, op1, op2)));
2302             else
2303                 set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ValueAdd, op1, op2)));
2304             NEXT_OPCODE(op_add);
2305         }
2306
2307         case op_sub: {
2308             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2309             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2310             set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithSub, op1, op2)));
2311             NEXT_OPCODE(op_sub);
2312         }
2313
2314         case op_negate: {
2315             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2316             set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithNegate, op1)));
2317             NEXT_OPCODE(op_negate);
2318         }
2319
2320         case op_mul: {
2321             // Multiply requires that the inputs are not truncated, unfortunately.
2322             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2323             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2324             set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithMul, op1, op2)));
2325             NEXT_OPCODE(op_mul);
2326         }
2327
2328         case op_mod: {
2329             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2330             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2331             set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithMod, op1, op2)));
2332             NEXT_OPCODE(op_mod);
2333         }
2334
2335         case op_div: {
2336             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2337             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2338             set(VirtualRegister(currentInstruction[1].u.operand), makeDivSafe(addToGraph(ArithDiv, op1, op2)));
2339             NEXT_OPCODE(op_div);
2340         }
2341
2342         // === Misc operations ===
2343
2344         case op_debug:
2345             addToGraph(Breakpoint);
2346             NEXT_OPCODE(op_debug);
2347
2348         case op_profile_will_call: {
2349             addToGraph(ProfileWillCall);
2350             NEXT_OPCODE(op_profile_will_call);
2351         }
2352
2353         case op_profile_did_call: {
2354             addToGraph(ProfileDidCall);
2355             NEXT_OPCODE(op_profile_did_call);
2356         }
2357
2358         case op_mov: {
2359             Node* op = get(VirtualRegister(currentInstruction[2].u.operand));
2360             set(VirtualRegister(currentInstruction[1].u.operand), op);
2361             NEXT_OPCODE(op_mov);
2362         }
2363             
2364         case op_captured_mov: {
2365             Node* op = get(VirtualRegister(currentInstruction[2].u.operand));
2366             if (VariableWatchpointSet* set = currentInstruction[3].u.watchpointSet) {
2367                 if (set->state() != IsInvalidated)
2368                     addToGraph(NotifyWrite, OpInfo(set), op);
2369             }
2370             set(VirtualRegister(currentInstruction[1].u.operand), op);
2371             NEXT_OPCODE(op_captured_mov);
2372         }
2373
2374         case op_check_has_instance:
2375             addToGraph(CheckHasInstance, get(VirtualRegister(currentInstruction[3].u.operand)));
2376             NEXT_OPCODE(op_check_has_instance);
2377
2378         case op_instanceof: {
2379             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2380             Node* prototype = get(VirtualRegister(currentInstruction[3].u.operand));
2381             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(InstanceOf, value, prototype));
2382             NEXT_OPCODE(op_instanceof);
2383         }
2384             
2385         case op_is_undefined: {
2386             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2387             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsUndefined, value));
2388             NEXT_OPCODE(op_is_undefined);
2389         }
2390
2391         case op_is_boolean: {
2392             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2393             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsBoolean, value));
2394             NEXT_OPCODE(op_is_boolean);
2395         }
2396
2397         case op_is_number: {
2398             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2399             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsNumber, value));
2400             NEXT_OPCODE(op_is_number);
2401         }
2402
2403         case op_is_string: {
2404             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2405             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsString, value));
2406             NEXT_OPCODE(op_is_string);
2407         }
2408
2409         case op_is_object: {
2410             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2411             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsObject, value));
2412             NEXT_OPCODE(op_is_object);
2413         }
2414
2415         case op_is_function: {
2416             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2417             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsFunction, value));
2418             NEXT_OPCODE(op_is_function);
2419         }
2420
2421         case op_not: {
2422             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2423             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, value));
2424             NEXT_OPCODE(op_not);
2425         }
2426             
2427         case op_to_primitive: {
2428             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2429             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(ToPrimitive, value));
2430             NEXT_OPCODE(op_to_primitive);
2431         }
2432             
2433         case op_strcat: {
2434             int startOperand = currentInstruction[2].u.operand;
2435             int numOperands = currentInstruction[3].u.operand;
2436 #if CPU(X86)
2437             // X86 doesn't have enough registers to compile MakeRope with three arguments.
2438             // Rather than try to be clever, we just make MakeRope dumber on this processor.
2439             const unsigned maxRopeArguments = 2;
2440 #else
2441             const unsigned maxRopeArguments = 3;
2442 #endif
2443             auto toStringNodes = std::make_unique<Node*[]>(numOperands);
2444             for (int i = 0; i < numOperands; i++)
2445                 toStringNodes[i] = addToGraph(ToString, get(VirtualRegister(startOperand - i)));
2446
2447             for (int i = 0; i < numOperands; i++)
2448                 addToGraph(Phantom, toStringNodes[i]);
2449
2450             Node* operands[AdjacencyList::Size];
2451             unsigned indexInOperands = 0;
2452             for (unsigned i = 0; i < AdjacencyList::Size; ++i)
2453                 operands[i] = 0;
2454             for (int operandIdx = 0; operandIdx < numOperands; ++operandIdx) {
2455                 if (indexInOperands == maxRopeArguments) {
2456                     operands[0] = addToGraph(MakeRope, operands[0], operands[1], operands[2]);
2457                     for (unsigned i = 1; i < AdjacencyList::Size; ++i)
2458                         operands[i] = 0;
2459                     indexInOperands = 1;
2460                 }
2461                 
2462                 ASSERT(indexInOperands < AdjacencyList::Size);
2463                 ASSERT(indexInOperands < maxRopeArguments);
2464                 operands[indexInOperands++] = toStringNodes[operandIdx];
2465             }
2466             set(VirtualRegister(currentInstruction[1].u.operand),
2467                 addToGraph(MakeRope, operands[0], operands[1], operands[2]));
2468             NEXT_OPCODE(op_strcat);
2469         }
2470
2471         case op_less: {
2472             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2473             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2474             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareLess, op1, op2));
2475             NEXT_OPCODE(op_less);
2476         }
2477
2478         case op_lesseq: {
2479             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2480             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2481             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareLessEq, op1, op2));
2482             NEXT_OPCODE(op_lesseq);
2483         }
2484
2485         case op_greater: {
2486             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2487             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2488             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareGreater, op1, op2));
2489             NEXT_OPCODE(op_greater);
2490         }
2491
2492         case op_greatereq: {
2493             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2494             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2495             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareGreaterEq, op1, op2));
2496             NEXT_OPCODE(op_greatereq);
2497         }
2498
2499         case op_eq: {
2500             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2501             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2502             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareEq, op1, op2));
2503             NEXT_OPCODE(op_eq);
2504         }
2505
2506         case op_eq_null: {
2507             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2508             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareEqConstant, value, addToGraph(JSConstant, OpInfo(m_constantNull))));
2509             NEXT_OPCODE(op_eq_null);
2510         }
2511
2512         case op_stricteq: {
2513             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2514             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2515             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareStrictEq, op1, op2));
2516             NEXT_OPCODE(op_stricteq);
2517         }
2518
2519         case op_neq: {
2520             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2521             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2522             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, addToGraph(CompareEq, op1, op2)));
2523             NEXT_OPCODE(op_neq);
2524         }
2525
2526         case op_neq_null: {
2527             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2528             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, addToGraph(CompareEqConstant, value, addToGraph(JSConstant, OpInfo(m_constantNull)))));
2529             NEXT_OPCODE(op_neq_null);
2530         }
2531
2532         case op_nstricteq: {
2533             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2534             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2535             Node* invertedResult;
2536             invertedResult = addToGraph(CompareStrictEq, op1, op2);
2537             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, invertedResult));
2538             NEXT_OPCODE(op_nstricteq);
2539         }
2540
2541         // === Property access operations ===
2542
2543         case op_get_by_val: {
2544             SpeculatedType prediction = getPredictionWithoutOSRExit();
2545             
2546             Node* base = get(VirtualRegister(currentInstruction[2].u.operand));
2547             ArrayMode arrayMode = getArrayModeConsideringSlowPath(currentInstruction[4].u.arrayProfile, Array::Read);
2548             Node* property = get(VirtualRegister(currentInstruction[3].u.operand));
2549             Node* getByVal = addToGraph(GetByVal, OpInfo(arrayMode.asWord()), OpInfo(prediction), base, property);
2550             set(VirtualRegister(currentInstruction[1].u.operand), getByVal);
2551
2552             NEXT_OPCODE(op_get_by_val);
2553         }
2554
2555         case op_put_by_val_direct:
2556         case op_put_by_val: {
2557             Node* base = get(VirtualRegister(currentInstruction[1].u.operand));
2558
2559             ArrayMode arrayMode = getArrayModeConsideringSlowPath(currentInstruction[4].u.arrayProfile, Array::Write);
2560             
2561             Node* property = get(VirtualRegister(currentInstruction[2].u.operand));
2562             Node* value = get(VirtualRegister(currentInstruction[3].u.operand));
2563             
2564             addVarArgChild(base);
2565             addVarArgChild(property);
2566             addVarArgChild(value);
2567             addVarArgChild(0); // Leave room for property storage.
2568             addVarArgChild(0); // Leave room for length.
2569             addToGraph(Node::VarArg, opcodeID == op_put_by_val_direct ? PutByValDirect : PutByVal, OpInfo(arrayMode.asWord()), OpInfo(0));
2570
2571             NEXT_OPCODE(op_put_by_val);
2572         }
2573             
2574         case op_get_by_id:
2575         case op_get_by_id_out_of_line:
2576         case op_get_array_length: {
2577             SpeculatedType prediction = getPrediction();
2578             
2579             Node* base = get(VirtualRegister(currentInstruction[2].u.operand));
2580             unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand];
2581             
2582             StringImpl* uid = m_graph.identifiers()[identifierNumber];
2583             GetByIdStatus getByIdStatus = GetByIdStatus::computeFor(
2584                 m_inlineStackTop->m_profiledBlock, m_dfgCodeBlock,
2585                 m_inlineStackTop->m_stubInfos, m_dfgStubInfos,
2586                 currentCodeOrigin(), uid);
2587             
2588             handleGetById(
2589                 currentInstruction[1].u.operand, prediction, base, identifierNumber, getByIdStatus);
2590
2591             NEXT_OPCODE(op_get_by_id);
2592         }
2593         case op_put_by_id:
2594         case op_put_by_id_out_of_line:
2595         case op_put_by_id_transition_direct:
2596         case op_put_by_id_transition_normal:
2597         case op_put_by_id_transition_direct_out_of_line:
2598         case op_put_by_id_transition_normal_out_of_line: {
2599             Node* value = get(VirtualRegister(currentInstruction[3].u.operand));
2600             Node* base = get(VirtualRegister(currentInstruction[1].u.operand));
2601             unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
2602             bool direct = currentInstruction[8].u.operand;
2603
2604             PutByIdStatus putByIdStatus = PutByIdStatus::computeFor(
2605                 m_inlineStackTop->m_profiledBlock, m_dfgCodeBlock,
2606                 m_inlineStackTop->m_stubInfos, m_dfgStubInfos,
2607                 currentCodeOrigin(), m_graph.identifiers()[identifierNumber]);
2608             
2609             handlePutById(base, identifierNumber, value, putByIdStatus, direct);
2610             NEXT_OPCODE(op_put_by_id);
2611         }
2612
2613         case op_init_global_const_nop: {
2614             NEXT_OPCODE(op_init_global_const_nop);
2615         }
2616
2617         case op_init_global_const: {
2618             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2619             addToGraph(
2620                 PutGlobalVar,
2621                 OpInfo(m_inlineStackTop->m_codeBlock->globalObject()->assertRegisterIsInThisObject(currentInstruction[1].u.registerPointer)),
2622                 value);
2623             NEXT_OPCODE(op_init_global_const);
2624         }
2625
2626         // === Block terminators. ===
2627
2628         case op_jmp: {
2629             int relativeOffset = currentInstruction[1].u.operand;
2630             if (relativeOffset <= 0)
2631                 flushForTerminal();
2632             addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
2633             LAST_OPCODE(op_jmp);
2634         }
2635
2636         case op_jtrue: {
2637             unsigned relativeOffset = currentInstruction[2].u.operand;
2638             Node* condition = get(VirtualRegister(currentInstruction[1].u.operand));
2639             addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jtrue))), condition);
2640             LAST_OPCODE(op_jtrue);
2641         }
2642
2643         case op_jfalse: {
2644             unsigned relativeOffset = currentInstruction[2].u.operand;
2645             Node* condition = get(VirtualRegister(currentInstruction[1].u.operand));
2646             addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jfalse), m_currentIndex + relativeOffset)), condition);
2647             LAST_OPCODE(op_jfalse);
2648         }
2649
2650         case op_jeq_null: {
2651             unsigned relativeOffset = currentInstruction[2].u.operand;
2652             Node* value = get(VirtualRegister(currentInstruction[1].u.operand));
2653             Node* condition = addToGraph(CompareEqConstant, value, addToGraph(JSConstant, OpInfo(m_constantNull)));
2654             addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jeq_null))), condition);
2655             LAST_OPCODE(op_jeq_null);
2656         }
2657
2658         case op_jneq_null: {
2659             unsigned relativeOffset = currentInstruction[2].u.operand;
2660             Node* value = get(VirtualRegister(currentInstruction[1].u.operand));
2661             Node* condition = addToGraph(CompareEqConstant, value, addToGraph(JSConstant, OpInfo(m_constantNull)));
2662             addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jneq_null), m_currentIndex + relativeOffset)), condition);
2663             LAST_OPCODE(op_jneq_null);
2664         }
2665
2666         case op_jless: {
2667             unsigned relativeOffset = currentInstruction[3].u.operand;
2668             Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
2669             Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
2670             Node* condition = addToGraph(CompareLess, op1, op2);
2671             addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jless))), condition);
2672             LAST_OPCODE(op_jless);
2673         }
2674
2675         case op_jlesseq: {
2676             unsigned relativeOffset = currentInstruction[3].u.operand;
2677             Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
2678             Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
2679             Node* condition = addToGraph(CompareLessEq, op1, op2);
2680             addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jlesseq))), condition);
2681             LAST_OPCODE(op_jlesseq);
2682         }
2683
2684         case op_jgreater: {
2685             unsigned relativeOffset = currentInstruction[3].u.operand;
2686             Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
2687             Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
2688             Node* condition = addToGraph(CompareGreater, op1, op2);
2689             addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jgreater))), condition);
2690             LAST_OPCODE(op_jgreater);
2691         }
2692
2693         case op_jgreatereq: {
2694             unsigned relativeOffset = currentInstruction[3].u.operand;
2695             Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
2696             Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
2697             Node* condition = addToGraph(CompareGreaterEq, op1, op2);
2698             addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jgreatereq))), condition);
2699             LAST_OPCODE(op_jgreatereq);
2700         }
2701
2702         case op_jnless: {
2703             unsigned relativeOffset = currentInstruction[3].u.operand;
2704             Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
2705             Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
2706             Node* condition = addToGraph(CompareLess, op1, op2);
2707             addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jnless), m_currentIndex + relativeOffset)), condition);
2708             LAST_OPCODE(op_jnless);
2709         }
2710
2711         case op_jnlesseq: {
2712             unsigned relativeOffset = currentInstruction[3].u.operand;
2713             Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
2714             Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
2715             Node* condition = addToGraph(CompareLessEq, op1, op2);
2716             addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jnlesseq), m_currentIndex + relativeOffset)), condition);
2717             LAST_OPCODE(op_jnlesseq);
2718         }
2719
2720         case op_jngreater: {
2721             unsigned relativeOffset = currentInstruction[3].u.operand;
2722             Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
2723             Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
2724             Node* condition = addToGraph(CompareGreater, op1, op2);
2725             addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jngreater), m_currentIndex + relativeOffset)), condition);
2726             LAST_OPCODE(op_jngreater);
2727         }
2728
2729         case op_jngreatereq: {
2730             unsigned relativeOffset = currentInstruction[3].u.operand;
2731             Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
2732             Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
2733             Node* condition = addToGraph(CompareGreaterEq, op1, op2);
2734             addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jngreatereq), m_currentIndex + relativeOffset)), condition);
2735             LAST_OPCODE(op_jngreatereq);
2736         }
2737             
2738         case op_switch_imm: {
2739             SwitchData& data = *m_graph.m_switchData.add();
2740             data.kind = SwitchImm;
2741             data.switchTableIndex = m_inlineStackTop->m_switchRemap[currentInstruction[1].u.operand];
2742             data.fallThrough.setBytecodeIndex(m_currentIndex + currentInstruction[2].u.operand);
2743             SimpleJumpTable& table = m_codeBlock->switchJumpTable(data.switchTableIndex);
2744             for (unsigned i = 0; i < table.branchOffsets.size(); ++i) {
2745                 if (!table.branchOffsets[i])
2746                     continue;
2747                 unsigned target = m_currentIndex + table.branchOffsets[i];
2748                 if (target == data.fallThrough.bytecodeIndex())
2749                     continue;
2750                 data.cases.append(SwitchCase::withBytecodeIndex(m_graph.freeze(jsNumber(static_cast<int32_t>(table.min + i))), target));
2751             }
2752             flushIfTerminal(data);
2753             addToGraph(Switch, OpInfo(&data), get(VirtualRegister(currentInstruction[3].u.operand)));
2754             LAST_OPCODE(op_switch_imm);
2755         }
2756             
2757         case op_switch_char: {
2758             SwitchData& data = *m_graph.m_switchData.add();
2759             data.kind = SwitchChar;
2760             data.switchTableIndex = m_inlineStackTop->m_switchRemap[currentInstruction[1].u.operand];
2761             data.fallThrough.setBytecodeIndex(m_currentIndex + currentInstruction[2].u.operand);
2762             SimpleJumpTable& table = m_codeBlock->switchJumpTable(data.switchTableIndex);
2763             for (unsigned i = 0; i < table.branchOffsets.size(); ++i) {
2764                 if (!table.branchOffsets[i])
2765                     continue;
2766                 unsigned target = m_currentIndex + table.branchOffsets[i];
2767                 if (target == data.fallThrough.bytecodeIndex())
2768                     continue;
2769                 data.cases.append(
2770                     SwitchCase::withBytecodeIndex(LazyJSValue::singleCharacterString(table.min + i), target));
2771             }
2772             flushIfTerminal(data);
2773             addToGraph(Switch, OpInfo(&data), get(VirtualRegister(currentInstruction[3].u.operand)));
2774             LAST_OPCODE(op_switch_char);
2775         }
2776
2777         case op_switch_string: {
2778             SwitchData& data = *m_graph.m_switchData.add();
2779             data.kind = SwitchString;
2780             data.switchTableIndex = currentInstruction[1].u.operand;
2781             data.fallThrough.setBytecodeIndex(m_currentIndex + currentInstruction[2].u.operand);
2782             StringJumpTable& table = m_codeBlock->stringSwitchJumpTable(data.switchTableIndex);
2783             StringJumpTable::StringOffsetTable::iterator iter;
2784             StringJumpTable::StringOffsetTable::iterator end = table.offsetTable.end();
2785             for (iter = table.offsetTable.begin(); iter != end; ++iter) {
2786                 unsigned target = m_currentIndex + iter->value.branchOffset;
2787                 if (target == data.fallThrough.bytecodeIndex())
2788                     continue;
2789                 data.cases.append(
2790                     SwitchCase::withBytecodeIndex(LazyJSValue::knownStringImpl(iter->key.get()), target));
2791             }
2792             flushIfTerminal(data);
2793             addToGraph(Switch, OpInfo(&data), get(VirtualRegister(currentInstruction[3].u.operand)));
2794             LAST_OPCODE(op_switch_string);
2795         }
2796
2797         case op_ret:
2798             flushForReturn();
2799             if (inlineCallFrame()) {
2800                 if (m_inlineStackTop->m_returnValue.isValid())
2801                     setDirect(m_inlineStackTop->m_returnValue, get(VirtualRegister(currentInstruction[1].u.operand)), ImmediateSetWithFlush);
2802                 m_inlineStackTop->m_didReturn = true;
2803                 if (m_inlineStackTop->m_unlinkedBlocks.isEmpty()) {
2804                     // If we're returning from the first block, then we're done parsing.
2805                     ASSERT(m_inlineStackTop->m_callsiteBlockHead == m_graph.lastBlock());
2806                     shouldContinueParsing = false;
2807                     LAST_OPCODE(op_ret);
2808                 } else {
2809                     // If inlining created blocks, and we're doing a return, then we need some
2810                     // special linking.
2811                     ASSERT(m_inlineStackTop->m_unlinkedBlocks.last().m_block == m_graph.lastBlock());
2812                     m_inlineStackTop->m_unlinkedBlocks.last().m_needsNormalLinking = false;
2813                 }
2814                 if (m_currentIndex + OPCODE_LENGTH(op_ret) != m_inlineStackTop->m_codeBlock->instructions().size() || m_inlineStackTop->m_didEarlyReturn) {
2815                     ASSERT(m_currentIndex + OPCODE_LENGTH(op_ret) <= m_inlineStackTop->m_codeBlock->instructions().size());
2816                     addToGraph(Jump, OpInfo(0));
2817                     m_inlineStackTop->m_unlinkedBlocks.last().m_needsEarlyReturnLinking = true;
2818                     m_inlineStackTop->m_didEarlyReturn = true;
2819                 }
2820                 LAST_OPCODE(op_ret);
2821             }
2822             addToGraph(Return, get(VirtualRegister(currentInstruction[1].u.operand)));
2823             LAST_OPCODE(op_ret);
2824             
2825         case op_end:
2826             flushForReturn();
2827             ASSERT(!inlineCallFrame());
2828             addToGraph(Return, get(VirtualRegister(currentInstruction[1].u.operand)));
2829             LAST_OPCODE(op_end);
2830
2831         case op_throw:
2832             addToGraph(Throw, get(VirtualRegister(currentInstruction[1].u.operand)));
2833             flushForTerminal();
2834             addToGraph(Unreachable);
2835             LAST_OPCODE(op_throw);
2836             
2837         case op_throw_static_error:
2838             addToGraph(ThrowReferenceError);
2839             flushForTerminal();
2840             addToGraph(Unreachable);
2841             LAST_OPCODE(op_throw_static_error);
2842             
2843         case op_call:
2844             handleCall(currentInstruction, Call, CodeForCall);
2845             NEXT_OPCODE(op_call);
2846             
2847         case op_construct:
2848             handleCall(currentInstruction, Construct, CodeForConstruct);
2849             NEXT_OPCODE(op_construct);
2850             
2851         case op_call_varargs: {
2852             int result = currentInstruction[1].u.operand;
2853             int callee = currentInstruction[2].u.operand;
2854             int thisReg = currentInstruction[3].u.operand;
2855             int arguments = currentInstruction[4].u.operand;
2856             int firstFreeReg = currentInstruction[5].u.operand;
2857             
2858             ASSERT(inlineCallFrame());
2859             ASSERT_UNUSED(arguments, arguments == m_inlineStackTop->m_codeBlock->argumentsRegister().offset());
2860             ASSERT(!m_inlineStackTop->m_codeBlock->symbolTable()->slowArguments());
2861
2862             addToGraph(CheckArgumentsNotCreated);
2863
2864             unsigned argCount = inlineCallFrame()->arguments.size();
2865             
2866             // Let's compute the register offset. We start with the last used register, and
2867             // then adjust for the things we want in the call frame.
2868             int registerOffset = firstFreeReg + 1;
2869             registerOffset -= argCount; // We will be passing some arguments.
2870             registerOffset -= JSStack::CallFrameHeaderSize; // We will pretend to have a call frame header.
2871             
2872             // Get the alignment right.
2873             registerOffset = -WTF::roundUpToMultipleOf(
2874                 stackAlignmentRegisters(),
2875                 -registerOffset);
2876
2877             ensureLocals(
2878                 m_inlineStackTop->remapOperand(
2879                     VirtualRegister(registerOffset)).toLocal());
2880             
2881             // The bytecode wouldn't have set up the arguments. But we'll do it and make it
2882             // look like the bytecode had done it.
2883             int nextRegister = registerOffset + JSStack::CallFrameHeaderSize;
2884             set(VirtualRegister(nextRegister++), get(VirtualRegister(thisReg)), ImmediateNakedSet);
2885             for (unsigned argument = 1; argument < argCount; ++argument)
2886                 set(VirtualRegister(nextRegister++), get(virtualRegisterForArgument(argument)), ImmediateNakedSet);
2887             
2888             handleCall(
2889                 result, Call, CodeForCall, OPCODE_LENGTH(op_call_varargs),
2890                 callee, argCount, registerOffset);
2891             NEXT_OPCODE(op_call_varargs);
2892         }
2893             
2894         case op_jneq_ptr:
2895             // Statically speculate for now. It makes sense to let speculate-only jneq_ptr
2896             // support simmer for a while before making it more general, since it's
2897             // already gnarly enough as it is.
2898             ASSERT(pointerIsFunction(currentInstruction[2].u.specialPointer));
2899             addToGraph(
2900                 CheckFunction,
2901                 OpInfo(m_graph.freeze(static_cast<JSCell*>(actualPointerFor(
2902                     m_inlineStackTop->m_codeBlock, currentInstruction[2].u.specialPointer)))),
2903                 get(VirtualRegister(currentInstruction[1].u.operand)));
2904             addToGraph(Jump, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jneq_ptr)));
2905             LAST_OPCODE(op_jneq_ptr);
2906
2907         case op_resolve_scope: {
2908             int dst = currentInstruction[1].u.operand;
2909             ResolveType resolveType = static_cast<ResolveType>(currentInstruction[3].u.operand);
2910             unsigned depth = currentInstruction[4].u.operand;
2911
2912             // get_from_scope and put_to_scope depend on this watchpoint forcing OSR exit, so they don't add their own watchpoints.
2913             if (needsVarInjectionChecks(resolveType))
2914                 addToGraph(VarInjectionWatchpoint);
2915
2916             switch (resolveType) {
2917             case GlobalProperty:
2918             case GlobalVar:
2919             case GlobalPropertyWithVarInjectionChecks:
2920             case GlobalVarWithVarInjectionChecks:
2921                 set(VirtualRegister(dst), weakJSConstant(m_inlineStackTop->m_codeBlock->globalObject()));
2922                 break;
2923             case ClosureVar:
2924             case ClosureVarWithVarInjectionChecks: {
2925                 JSActivation* activation = currentInstruction[5].u.activation.get();
2926                 if (activation
2927                     && activation->symbolTable()->m_functionEnteredOnce.isStillValid()) {
2928                     addToGraph(FunctionReentryWatchpoint, OpInfo(activation->symbolTable()));
2929                     set(VirtualRegister(dst), weakJSConstant(activation));
2930                     break;
2931                 }
2932                 set(VirtualRegister(dst),
2933                     getScope(m_inlineStackTop->m_codeBlock->needsActivation(), depth));
2934                 break;
2935             }
2936             case Dynamic:
2937                 RELEASE_ASSERT_NOT_REACHED();
2938                 break;
2939             }
2940             NEXT_OPCODE(op_resolve_scope);
2941         }
2942
2943         case op_get_from_scope: {
2944             int dst = currentInstruction[1].u.operand;
2945             int scope = currentInstruction[2].u.operand;
2946             unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand];
2947             StringImpl* uid = m_graph.identifiers()[identifierNumber];
2948             ResolveType resolveType = ResolveModeAndType(currentInstruction[4].u.operand).type();
2949
2950             Structure* structure = 0;
2951             WatchpointSet* watchpoints = 0;
2952             uintptr_t operand;
2953             {
2954                 ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
2955                 if (resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks)
2956                     watchpoints = currentInstruction[5].u.watchpointSet;
2957                 else
2958                     structure = currentInstruction[5].u.structure.get();
2959                 operand = reinterpret_cast<uintptr_t>(currentInstruction[6].u.pointer);
2960             }
2961
2962             UNUSED_PARAM(watchpoints); // We will use this in the future. For now we set it as a way of documenting the fact that that's what index 5 is in GlobalVar mode.
2963
2964             JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
2965
2966             switch (resolveType) {
2967             case GlobalProperty:
2968             case GlobalPropertyWithVarInjectionChecks: {
2969                 SpeculatedType prediction = getPrediction();
2970                 GetByIdStatus status = GetByIdStatus::computeFor(*m_vm, structure, uid);
2971                 if (status.state() != GetByIdStatus::Simple
2972                     || status.numVariants() != 1
2973                     || status[0].structureSet().size() != 1) {
2974                     set(VirtualRegister(dst), addToGraph(GetByIdFlush, OpInfo(identifierNumber), OpInfo(prediction), get(VirtualRegister(scope))));
2975                     break;
2976                 }
2977                 Node* base = cellConstantWithStructureCheck(globalObject, status[0].structureSet().onlyStructure());
2978                 addToGraph(Phantom, get(VirtualRegister(scope)));
2979                 set(VirtualRegister(dst), handleGetByOffset(prediction, base, status[0].structureSet(), identifierNumber, operand));
2980                 break;
2981             }
2982             case GlobalVar:
2983             case GlobalVarWithVarInjectionChecks: {
2984                 addToGraph(Phantom, get(VirtualRegister(scope)));
2985                 SymbolTableEntry entry = globalObject->symbolTable()->get(uid);
2986                 VariableWatchpointSet* watchpointSet = entry.watchpointSet();
2987                 JSValue inferredValue =
2988                     watchpointSet ? watchpointSet->inferredValue() : JSValue();
2989                 if (!inferredValue) {
2990                     SpeculatedType prediction = getPrediction();
2991                     set(VirtualRegister(dst), addToGraph(GetGlobalVar, OpInfo(operand), OpInfo(prediction)));
2992                     break;
2993                 }
2994                 
2995                 addToGraph(VariableWatchpoint, OpInfo(watchpointSet));
2996                 set(VirtualRegister(dst), weakJSConstant(inferredValue));
2997                 break;
2998             }
2999             case ClosureVar:
3000             case ClosureVarWithVarInjectionChecks: {
3001                 Node* scopeNode = get(VirtualRegister(scope));
3002                 if (JSActivation* activation = m_graph.tryGetActivation(scopeNode)) {
3003                     SymbolTable* symbolTable = activation->symbolTable();
3004                     ConcurrentJITLocker locker(symbolTable->m_lock);
3005                     SymbolTable::Map::iterator iter = symbolTable->find(locker, uid);
3006                     ASSERT(iter != symbolTable->end(locker));
3007                     VariableWatchpointSet* watchpointSet = iter->value.watchpointSet();
3008                     if (watchpointSet) {
3009                         if (JSValue value = watchpointSet->inferredValue()) {
3010                             addToGraph(Phantom, scopeNode);
3011                             addToGraph(VariableWatchpoint, OpInfo(watchpointSet));
3012                             set(VirtualRegister(dst), weakJSConstant(value));
3013                             break;
3014                         }
3015                     }
3016                 }
3017                 SpeculatedType prediction = getPrediction();
3018                 set(VirtualRegister(dst),
3019                     addToGraph(GetClosureVar, OpInfo(operand), OpInfo(prediction), 
3020                         addToGraph(GetClosureRegisters, scopeNode)));
3021                 break;
3022             }
3023             case Dynamic:
3024                 RELEASE_ASSERT_NOT_REACHED();
3025                 break;
3026             }
3027             NEXT_OPCODE(op_get_from_scope);
3028         }
3029
3030         case op_put_to_scope: {
3031             unsigned scope = currentInstruction[1].u.operand;
3032             unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
3033             unsigned value = currentInstruction[3].u.operand;
3034             ResolveType resolveType = ResolveModeAndType(currentInstruction[4].u.operand).type();
3035             StringImpl* uid = m_graph.identifiers()[identifierNumber];
3036
3037             Structure* structure = 0;
3038             VariableWatchpointSet* watchpoints = 0;
3039             uintptr_t operand;
3040             {
3041                 ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
3042                 if (resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks)
3043                     watchpoints = currentInstruction[5].u.watchpointSet;
3044                 else
3045                     structure = currentInstruction[5].u.structure.get();
3046                 operand = reinterpret_cast<uintptr_t>(currentInstruction[6].u.pointer);
3047             }
3048
3049             JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
3050
3051             switch (resolveType) {
3052             case GlobalProperty:
3053             case GlobalPropertyWithVarInjectionChecks: {
3054                 PutByIdStatus status = PutByIdStatus::computeFor(*m_vm, globalObject, structure, uid, false);
3055                 if (status.numVariants() != 1
3056                     || status[0].kind() != PutByIdVariant::Replace
3057                     || status[0].structure().size() != 1) {
3058                     addToGraph(PutById, OpInfo(identifierNumber), get(VirtualRegister(scope)), get(VirtualRegister(value)));
3059                     break;
3060                 }
3061                 ASSERT(status[0].structure().onlyStructure() == structure);
3062                 Node* base = cellConstantWithStructureCheck(globalObject, structure);
3063                 addToGraph(Phantom, get(VirtualRegister(scope)));
3064                 handlePutByOffset(base, identifierNumber, static_cast<PropertyOffset>(operand), get(VirtualRegister(value)));
3065                 // Keep scope alive until after put.
3066                 addToGraph(Phantom, get(VirtualRegister(scope)));
3067                 break;
3068             }
3069             case GlobalVar:
3070             case GlobalVarWithVarInjectionChecks: {
3071                 SymbolTableEntry entry = globalObject->symbolTable()->get(uid);
3072                 ASSERT(watchpoints == entry.watchpointSet());
3073                 Node* valueNode = get(VirtualRegister(value));
3074                 addToGraph(PutGlobalVar, OpInfo(operand), valueNode);
3075                 if (watchpoints->state() != IsInvalidated)
3076                     addToGraph(NotifyWrite, OpInfo(watchpoints), valueNode);
3077                 // Keep scope alive until after put.
3078                 addToGraph(Phantom, get(VirtualRegister(scope)));
3079                 break;
3080             }
3081             case ClosureVar:
3082             case ClosureVarWithVarInjectionChecks: {
3083                 Node* scopeNode = get(VirtualRegister(scope));
3084                 Node* scopeRegisters = addToGraph(GetClosureRegisters, scopeNode);
3085                 addToGraph(PutClosureVar, OpInfo(operand), scopeNode, scopeRegisters, get(VirtualRegister(value)));
3086                 break;
3087             }
3088             case Dynamic:
3089                 RELEASE_ASSERT_NOT_REACHED();
3090                 break;
3091             }
3092             NEXT_OPCODE(op_put_to_scope);
3093         }
3094
3095         case op_loop_hint: {
3096             // Baseline->DFG OSR jumps between loop hints. The DFG assumes that Baseline->DFG
3097             // OSR can only happen at basic block boundaries. Assert that these two statements
3098             // are compatible.
3099             RELEASE_ASSERT(m_currentIndex == blockBegin);
3100             
3101             // We never do OSR into an inlined code block. That could not happen, since OSR
3102             // looks up the code block that is the replacement for the baseline JIT code
3103             // block. Hence, machine code block = true code block = not inline code block.
3104             if (!m_inlineStackTop->m_caller)
3105                 m_currentBlock->isOSRTarget = true;
3106
3107             addToGraph(LoopHint);
3108             
3109             if (m_vm->watchdog && m_vm->watchdog->isEnabled())
3110                 addToGraph(CheckWatchdogTimer);
3111             
3112             NEXT_OPCODE(op_loop_hint);
3113         }
3114             
3115         case op_init_lazy_reg: {
3116             set(VirtualRegister(currentInstruction[1].u.operand), jsConstant(JSValue()));
3117             ASSERT(operandIsLocal(currentInstruction[1].u.operand));
3118             m_graph.m_lazyVars.set(VirtualRegister(currentInstruction[1].u.operand).toLocal());
3119             NEXT_OPCODE(op_init_lazy_reg);
3120         }