2797fd64c9b73bfbeaf9c903800151a322606e84
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGByteCodeParser.cpp
1  /*
2  * Copyright (C) 2011, 2012, 2013, 2014 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGByteCodeParser.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "ArrayConstructor.h"
32 #include "CallLinkStatus.h"
33 #include "CodeBlock.h"
34 #include "CodeBlockWithJITType.h"
35 #include "DFGArrayMode.h"
36 #include "DFGCapabilities.h"
37 #include "DFGJITCode.h"
38 #include "GetByIdStatus.h"
39 #include "Heap.h"
40 #include "JSLexicalEnvironment.h"
41 #include "JSCInlines.h"
42 #include "PreciseJumpTargets.h"
43 #include "PutByIdStatus.h"
44 #include "StackAlignment.h"
45 #include "StringConstructor.h"
46 #include <wtf/CommaPrinter.h>
47 #include <wtf/HashMap.h>
48 #include <wtf/MathExtras.h>
49 #include <wtf/StdLibExtras.h>
50
51 namespace JSC { namespace DFG {
52
53 static const bool verbose = false;
54
55 class ConstantBufferKey {
56 public:
57     ConstantBufferKey()
58         : m_codeBlock(0)
59         , m_index(0)
60     {
61     }
62     
63     ConstantBufferKey(WTF::HashTableDeletedValueType)
64         : m_codeBlock(0)
65         , m_index(1)
66     {
67     }
68     
69     ConstantBufferKey(CodeBlock* codeBlock, unsigned index)
70         : m_codeBlock(codeBlock)
71         , m_index(index)
72     {
73     }
74     
75     bool operator==(const ConstantBufferKey& other) const
76     {
77         return m_codeBlock == other.m_codeBlock
78             && m_index == other.m_index;
79     }
80     
81     unsigned hash() const
82     {
83         return WTF::PtrHash<CodeBlock*>::hash(m_codeBlock) ^ m_index;
84     }
85     
86     bool isHashTableDeletedValue() const
87     {
88         return !m_codeBlock && m_index;
89     }
90     
91     CodeBlock* codeBlock() const { return m_codeBlock; }
92     unsigned index() const { return m_index; }
93     
94 private:
95     CodeBlock* m_codeBlock;
96     unsigned m_index;
97 };
98
99 struct ConstantBufferKeyHash {
100     static unsigned hash(const ConstantBufferKey& key) { return key.hash(); }
101     static bool equal(const ConstantBufferKey& a, const ConstantBufferKey& b)
102     {
103         return a == b;
104     }
105     
106     static const bool safeToCompareToEmptyOrDeleted = true;
107 };
108
109 } } // namespace JSC::DFG
110
111 namespace WTF {
112
113 template<typename T> struct DefaultHash;
114 template<> struct DefaultHash<JSC::DFG::ConstantBufferKey> {
115     typedef JSC::DFG::ConstantBufferKeyHash Hash;
116 };
117
118 template<typename T> struct HashTraits;
119 template<> struct HashTraits<JSC::DFG::ConstantBufferKey> : SimpleClassHashTraits<JSC::DFG::ConstantBufferKey> { };
120
121 } // namespace WTF
122
123 namespace JSC { namespace DFG {
124
125 // === ByteCodeParser ===
126 //
127 // This class is used to compile the dataflow graph from a CodeBlock.
128 class ByteCodeParser {
129 public:
130     ByteCodeParser(Graph& graph)
131         : m_vm(&graph.m_vm)
132         , m_codeBlock(graph.m_codeBlock)
133         , m_profiledBlock(graph.m_profiledBlock)
134         , m_graph(graph)
135         , m_currentBlock(0)
136         , m_currentIndex(0)
137         , m_constantUndefined(graph.freeze(jsUndefined()))
138         , m_constantNull(graph.freeze(jsNull()))
139         , m_constantNaN(graph.freeze(jsNumber(PNaN)))
140         , m_constantOne(graph.freeze(jsNumber(1)))
141         , m_numArguments(m_codeBlock->numParameters())
142         , m_numLocals(m_codeBlock->m_numCalleeRegisters)
143         , m_parameterSlots(0)
144         , m_numPassedVarArgs(0)
145         , m_inlineStackTop(0)
146         , m_haveBuiltOperandMaps(false)
147         , m_currentInstruction(0)
148     {
149         ASSERT(m_profiledBlock);
150     }
151     
152     // Parse a full CodeBlock of bytecode.
153     bool parse();
154     
155 private:
156     struct InlineStackEntry;
157
158     // Just parse from m_currentIndex to the end of the current CodeBlock.
159     void parseCodeBlock();
160     
161     void ensureLocals(unsigned newNumLocals)
162     {
163         if (newNumLocals <= m_numLocals)
164             return;
165         m_numLocals = newNumLocals;
166         for (size_t i = 0; i < m_graph.numBlocks(); ++i)
167             m_graph.block(i)->ensureLocals(newNumLocals);
168     }
169
170     // Helper for min and max.
171     bool handleMinMax(int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis);
172     
173     // Handle calls. This resolves issues surrounding inlining and intrinsics.
174     void handleCall(
175         int result, NodeType op, InlineCallFrame::Kind, unsigned instructionSize,
176         Node* callTarget, int argCount, int registerOffset, CallLinkStatus,
177         SpeculatedType prediction);
178     void handleCall(
179         int result, NodeType op, InlineCallFrame::Kind, unsigned instructionSize,
180         Node* callTarget, int argCount, int registerOffset, CallLinkStatus);
181     void handleCall(int result, NodeType op, CodeSpecializationKind, unsigned instructionSize, int callee, int argCount, int registerOffset);
182     void handleCall(Instruction* pc, NodeType op, CodeSpecializationKind);
183     void emitFunctionChecks(CallVariant, Node* callTarget, int registerOffset, CodeSpecializationKind);
184     void undoFunctionChecks(CallVariant);
185     void emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis, CodeSpecializationKind);
186     unsigned inliningCost(CallVariant, int argumentCountIncludingThis, CodeSpecializationKind); // Return UINT_MAX if it's not an inlining candidate. By convention, intrinsics have a cost of 1.
187     // Handle inlining. Return true if it succeeded, false if we need to plant a call.
188     bool handleInlining(Node* callTargetNode, int resultOperand, const CallLinkStatus&, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, NodeType callOp, InlineCallFrame::Kind, SpeculatedType prediction);
189     enum CallerLinkability { CallerDoesNormalLinking, CallerLinksManually };
190     bool attemptToInlineCall(Node* callTargetNode, int resultOperand, CallVariant, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind, CallerLinkability, SpeculatedType prediction, unsigned& inliningBalance);
191     void inlineCall(Node* callTargetNode, int resultOperand, CallVariant, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind, CallerLinkability);
192     void cancelLinkingForBlock(InlineStackEntry*, BasicBlock*); // Only works when the given block is the last one to have been added for that inline stack entry.
193     // Handle intrinsic functions. Return true if it succeeded, false if we need to plant a call.
194     bool handleIntrinsic(int resultOperand, Intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction);
195     bool handleTypedArrayConstructor(int resultOperand, InternalFunction*, int registerOffset, int argumentCountIncludingThis, TypedArrayType);
196     bool handleConstantInternalFunction(int resultOperand, InternalFunction*, int registerOffset, int argumentCountIncludingThis, CodeSpecializationKind);
197     Node* handlePutByOffset(Node* base, unsigned identifier, PropertyOffset, Node* value);
198     Node* handleGetByOffset(SpeculatedType, Node* base, const StructureSet&, unsigned identifierNumber, PropertyOffset, NodeType op = GetByOffset);
199     void handleGetById(
200         int destinationOperand, SpeculatedType, Node* base, unsigned identifierNumber,
201         const GetByIdStatus&);
202     void emitPutById(
203         Node* base, unsigned identifierNumber, Node* value,  const PutByIdStatus&, bool isDirect);
204     void handlePutById(
205         Node* base, unsigned identifierNumber, Node* value, const PutByIdStatus&,
206         bool isDirect);
207     void emitChecks(const ConstantStructureCheckVector&);
208
209     Node* getScope(unsigned skipCount);
210     
211     void prepareToParseBlock();
212     void clearCaches();
213
214     // Parse a single basic block of bytecode instructions.
215     bool parseBlock(unsigned limit);
216     // Link block successors.
217     void linkBlock(BasicBlock*, Vector<BasicBlock*>& possibleTargets);
218     void linkBlocks(Vector<UnlinkedBlock>& unlinkedBlocks, Vector<BasicBlock*>& possibleTargets);
219     
220     VariableAccessData* newVariableAccessData(VirtualRegister operand, bool isCaptured)
221     {
222         ASSERT(!operand.isConstant());
223         
224         m_graph.m_variableAccessData.append(VariableAccessData(operand, isCaptured));
225         return &m_graph.m_variableAccessData.last();
226     }
227     
228     // Get/Set the operands/result of a bytecode instruction.
229     Node* getDirect(VirtualRegister operand)
230     {
231         ASSERT(!operand.isConstant());
232
233         // Is this an argument?
234         if (operand.isArgument())
235             return getArgument(operand);
236
237         // Must be a local.
238         return getLocal(operand);
239     }
240
241     Node* get(VirtualRegister operand)
242     {
243         if (operand.isConstant()) {
244             unsigned constantIndex = operand.toConstantIndex();
245             unsigned oldSize = m_constants.size();
246             if (constantIndex >= oldSize || !m_constants[constantIndex]) {
247                 JSValue value = m_inlineStackTop->m_codeBlock->getConstant(operand.offset());
248                 if (constantIndex >= oldSize) {
249                     m_constants.grow(constantIndex + 1);
250                     for (unsigned i = oldSize; i < m_constants.size(); ++i)
251                         m_constants[i] = nullptr;
252                 }
253                 m_constants[constantIndex] =
254                     addToGraph(JSConstant, OpInfo(m_graph.freezeStrong(value)));
255             }
256             ASSERT(m_constants[constantIndex]);
257             return m_constants[constantIndex];
258         }
259         
260         if (inlineCallFrame()) {
261             if (!inlineCallFrame()->isClosureCall) {
262                 JSFunction* callee = inlineCallFrame()->calleeConstant();
263                 if (operand.offset() == JSStack::Callee)
264                     return weakJSConstant(callee);
265                 if (operand.offset() == JSStack::ScopeChain)
266                     return weakJSConstant(callee->scope());
267             }
268         } else if (operand.offset() == JSStack::Callee)
269             return addToGraph(GetCallee);
270         else if (operand.offset() == JSStack::ScopeChain)
271             return addToGraph(GetMyScope);
272         
273         return getDirect(m_inlineStackTop->remapOperand(operand));
274     }
275     
276     enum SetMode {
277         // A normal set which follows a two-phase commit that spans code origins. During
278         // the current code origin it issues a MovHint, and at the start of the next
279         // code origin there will be a SetLocal. If the local needs flushing, the second
280         // SetLocal will be preceded with a Flush.
281         NormalSet,
282         
283         // A set where the SetLocal happens immediately and there is still a Flush. This
284         // is relevant when assigning to a local in tricky situations for the delayed
285         // SetLocal logic but where we know that we have not performed any side effects
286         // within this code origin. This is a safe replacement for NormalSet anytime we
287         // know that we have not yet performed side effects in this code origin.
288         ImmediateSetWithFlush,
289         
290         // A set where the SetLocal happens immediately and we do not Flush it even if
291         // this is a local that is marked as needing it. This is relevant when
292         // initializing locals at the top of a function.
293         ImmediateNakedSet
294     };
295     Node* setDirect(VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
296     {
297         addToGraph(MovHint, OpInfo(operand.offset()), value);
298         
299         DelayedSetLocal delayed = DelayedSetLocal(operand, value);
300         
301         if (setMode == NormalSet) {
302             m_setLocalQueue.append(delayed);
303             return 0;
304         }
305         
306         return delayed.execute(this, setMode);
307     }
308     
309     void processSetLocalQueue()
310     {
311         for (unsigned i = 0; i < m_setLocalQueue.size(); ++i)
312             m_setLocalQueue[i].execute(this);
313         m_setLocalQueue.resize(0);
314     }
315
316     Node* set(VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
317     {
318         return setDirect(m_inlineStackTop->remapOperand(operand), value, setMode);
319     }
320     
321     Node* injectLazyOperandSpeculation(Node* node)
322     {
323         ASSERT(node->op() == GetLocal);
324         ASSERT(node->origin.semantic.bytecodeIndex == m_currentIndex);
325         ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
326         LazyOperandValueProfileKey key(m_currentIndex, node->local());
327         SpeculatedType prediction = m_inlineStackTop->m_lazyOperands.prediction(locker, key);
328         node->variableAccessData()->predict(prediction);
329         return node;
330     }
331
332     // Used in implementing get/set, above, where the operand is a local variable.
333     Node* getLocal(VirtualRegister operand)
334     {
335         unsigned local = operand.toLocal();
336
337         if (local < m_localWatchpoints.size()) {
338             if (VariableWatchpointSet* set = m_localWatchpoints[local]) {
339                 if (JSValue value = set->inferredValue()) {
340                     addToGraph(FunctionReentryWatchpoint, OpInfo(m_codeBlock->symbolTable()));
341                     addToGraph(VariableWatchpoint, OpInfo(set));
342                     return weakJSConstant(value);
343                 }
344             }
345         }
346
347         Node* node = m_currentBlock->variablesAtTail.local(local);
348         bool isCaptured = m_codeBlock->isCaptured(operand, inlineCallFrame());
349         
350         // This has two goals: 1) link together variable access datas, and 2)
351         // try to avoid creating redundant GetLocals. (1) is required for
352         // correctness - no other phase will ensure that block-local variable
353         // access data unification is done correctly. (2) is purely opportunistic
354         // and is meant as an compile-time optimization only.
355         
356         VariableAccessData* variable;
357         
358         if (node) {
359             variable = node->variableAccessData();
360             variable->mergeIsCaptured(isCaptured);
361             
362             if (!isCaptured) {
363                 switch (node->op()) {
364                 case GetLocal:
365                     return node;
366                 case SetLocal:
367                     return node->child1().node();
368                 default:
369                     break;
370                 }
371             }
372         } else
373             variable = newVariableAccessData(operand, isCaptured);
374         
375         node = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variable)));
376         m_currentBlock->variablesAtTail.local(local) = node;
377         return node;
378     }
379
380     Node* setLocal(VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
381     {
382         unsigned local = operand.toLocal();
383         bool isCaptured = m_codeBlock->isCaptured(operand, inlineCallFrame());
384         
385         if (setMode != ImmediateNakedSet) {
386             ArgumentPosition* argumentPosition = findArgumentPositionForLocal(operand);
387             if (isCaptured || argumentPosition)
388                 flushDirect(operand, argumentPosition);
389         }
390
391         VariableAccessData* variableAccessData = newVariableAccessData(operand, isCaptured);
392         variableAccessData->mergeStructureCheckHoistingFailed(
393             m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
394         variableAccessData->mergeCheckArrayHoistingFailed(
395             m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIndexingType));
396         Node* node = addToGraph(SetLocal, OpInfo(variableAccessData), value);
397         m_currentBlock->variablesAtTail.local(local) = node;
398         return node;
399     }
400
401     // Used in implementing get/set, above, where the operand is an argument.
402     Node* getArgument(VirtualRegister operand)
403     {
404         unsigned argument = operand.toArgument();
405         ASSERT(argument < m_numArguments);
406         
407         Node* node = m_currentBlock->variablesAtTail.argument(argument);
408         bool isCaptured = m_codeBlock->isCaptured(operand);
409
410         VariableAccessData* variable;
411         
412         if (node) {
413             variable = node->variableAccessData();
414             variable->mergeIsCaptured(isCaptured);
415             
416             switch (node->op()) {
417             case GetLocal:
418                 return node;
419             case SetLocal:
420                 return node->child1().node();
421             default:
422                 break;
423             }
424         } else
425             variable = newVariableAccessData(operand, isCaptured);
426         
427         node = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variable)));
428         m_currentBlock->variablesAtTail.argument(argument) = node;
429         return node;
430     }
431     Node* setArgument(VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
432     {
433         unsigned argument = operand.toArgument();
434         ASSERT(argument < m_numArguments);
435         
436         bool isCaptured = m_codeBlock->isCaptured(operand);
437
438         VariableAccessData* variableAccessData = newVariableAccessData(operand, isCaptured);
439
440         // Always flush arguments, except for 'this'. If 'this' is created by us,
441         // then make sure that it's never unboxed.
442         if (argument) {
443             if (setMode != ImmediateNakedSet)
444                 flushDirect(operand);
445         } else if (m_codeBlock->specializationKind() == CodeForConstruct)
446             variableAccessData->mergeShouldNeverUnbox(true);
447         
448         variableAccessData->mergeStructureCheckHoistingFailed(
449             m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
450         variableAccessData->mergeCheckArrayHoistingFailed(
451             m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIndexingType));
452         Node* node = addToGraph(SetLocal, OpInfo(variableAccessData), value);
453         m_currentBlock->variablesAtTail.argument(argument) = node;
454         return node;
455     }
456     
457     ArgumentPosition* findArgumentPositionForArgument(int argument)
458     {
459         InlineStackEntry* stack = m_inlineStackTop;
460         while (stack->m_inlineCallFrame)
461             stack = stack->m_caller;
462         return stack->m_argumentPositions[argument];
463     }
464     
465     ArgumentPosition* findArgumentPositionForLocal(VirtualRegister operand)
466     {
467         for (InlineStackEntry* stack = m_inlineStackTop; ; stack = stack->m_caller) {
468             InlineCallFrame* inlineCallFrame = stack->m_inlineCallFrame;
469             if (!inlineCallFrame)
470                 break;
471             if (operand.offset() < static_cast<int>(inlineCallFrame->stackOffset + JSStack::CallFrameHeaderSize))
472                 continue;
473             if (operand.offset() == inlineCallFrame->stackOffset + CallFrame::thisArgumentOffset())
474                 continue;
475             if (operand.offset() >= static_cast<int>(inlineCallFrame->stackOffset + CallFrame::thisArgumentOffset() + inlineCallFrame->arguments.size()))
476                 continue;
477             int argument = VirtualRegister(operand.offset() - inlineCallFrame->stackOffset).toArgument();
478             return stack->m_argumentPositions[argument];
479         }
480         return 0;
481     }
482     
483     ArgumentPosition* findArgumentPosition(VirtualRegister operand)
484     {
485         if (operand.isArgument())
486             return findArgumentPositionForArgument(operand.toArgument());
487         return findArgumentPositionForLocal(operand);
488     }
489
490     void flush(VirtualRegister operand)
491     {
492         flushDirect(m_inlineStackTop->remapOperand(operand));
493     }
494     
495     void flushDirect(VirtualRegister operand)
496     {
497         flushDirect(operand, findArgumentPosition(operand));
498     }
499     
500     void flushDirect(VirtualRegister operand, ArgumentPosition* argumentPosition)
501     {
502         bool isCaptured = m_codeBlock->isCaptured(operand, inlineCallFrame());
503         
504         ASSERT(!operand.isConstant());
505         
506         Node* node = m_currentBlock->variablesAtTail.operand(operand);
507         
508         VariableAccessData* variable;
509         
510         if (node) {
511             variable = node->variableAccessData();
512             variable->mergeIsCaptured(isCaptured);
513         } else
514             variable = newVariableAccessData(operand, isCaptured);
515         
516         node = addToGraph(Flush, OpInfo(variable));
517         m_currentBlock->variablesAtTail.operand(operand) = node;
518         if (argumentPosition)
519             argumentPosition->addVariable(variable);
520     }
521     
522     void flush(InlineStackEntry* inlineStackEntry)
523     {
524         int numArguments;
525         if (InlineCallFrame* inlineCallFrame = inlineStackEntry->m_inlineCallFrame) {
526             numArguments = inlineCallFrame->arguments.size();
527             if (inlineCallFrame->isClosureCall) {
528                 flushDirect(inlineStackEntry->remapOperand(VirtualRegister(JSStack::Callee)));
529                 flushDirect(inlineStackEntry->remapOperand(VirtualRegister(JSStack::ScopeChain)));
530             }
531         } else
532             numArguments = inlineStackEntry->m_codeBlock->numParameters();
533         for (unsigned argument = numArguments; argument-- > 1;)
534             flushDirect(inlineStackEntry->remapOperand(virtualRegisterForArgument(argument)));
535         for (int local = 0; local < inlineStackEntry->m_codeBlock->m_numVars; ++local) {
536             if (!inlineStackEntry->m_codeBlock->isCaptured(virtualRegisterForLocal(local)))
537                 continue;
538             flushDirect(inlineStackEntry->remapOperand(virtualRegisterForLocal(local)));
539         }
540     }
541
542     void flushForTerminal()
543     {
544         for (InlineStackEntry* inlineStackEntry = m_inlineStackTop; inlineStackEntry; inlineStackEntry = inlineStackEntry->m_caller)
545             flush(inlineStackEntry);
546     }
547
548     void flushForReturn()
549     {
550         flush(m_inlineStackTop);
551     }
552     
553     void flushIfTerminal(SwitchData& data)
554     {
555         if (data.fallThrough.bytecodeIndex() > m_currentIndex)
556             return;
557         
558         for (unsigned i = data.cases.size(); i--;) {
559             if (data.cases[i].target.bytecodeIndex() > m_currentIndex)
560                 return;
561         }
562         
563         flushForTerminal();
564     }
565
566     // Assumes that the constant should be strongly marked.
567     Node* jsConstant(JSValue constantValue)
568     {
569         return addToGraph(JSConstant, OpInfo(m_graph.freezeStrong(constantValue)));
570     }
571
572     Node* weakJSConstant(JSValue constantValue)
573     {
574         return addToGraph(JSConstant, OpInfo(m_graph.freeze(constantValue)));
575     }
576
577     // Helper functions to get/set the this value.
578     Node* getThis()
579     {
580         return get(m_inlineStackTop->m_codeBlock->thisRegister());
581     }
582
583     void setThis(Node* value)
584     {
585         set(m_inlineStackTop->m_codeBlock->thisRegister(), value);
586     }
587
588     InlineCallFrame* inlineCallFrame()
589     {
590         return m_inlineStackTop->m_inlineCallFrame;
591     }
592
593     CodeOrigin currentCodeOrigin()
594     {
595         return CodeOrigin(m_currentIndex, inlineCallFrame());
596     }
597     
598     BranchData* branchData(unsigned taken, unsigned notTaken)
599     {
600         // We assume that branches originating from bytecode always have a fall-through. We
601         // use this assumption to avoid checking for the creation of terminal blocks.
602         ASSERT((taken > m_currentIndex) || (notTaken > m_currentIndex));
603         BranchData* data = m_graph.m_branchData.add();
604         *data = BranchData::withBytecodeIndices(taken, notTaken);
605         return data;
606     }
607     
608     Node* addToGraph(NodeType op, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
609     {
610         Node* result = m_graph.addNode(
611             SpecNone, op, NodeOrigin(currentCodeOrigin()), Edge(child1), Edge(child2),
612             Edge(child3));
613         ASSERT(op != Phi);
614         m_currentBlock->append(result);
615         return result;
616     }
617     Node* addToGraph(NodeType op, Edge child1, Edge child2 = Edge(), Edge child3 = Edge())
618     {
619         Node* result = m_graph.addNode(
620             SpecNone, op, NodeOrigin(currentCodeOrigin()), child1, child2, child3);
621         ASSERT(op != Phi);
622         m_currentBlock->append(result);
623         return result;
624     }
625     Node* addToGraph(NodeType op, OpInfo info, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
626     {
627         Node* result = m_graph.addNode(
628             SpecNone, op, NodeOrigin(currentCodeOrigin()), info, Edge(child1), Edge(child2),
629             Edge(child3));
630         ASSERT(op != Phi);
631         m_currentBlock->append(result);
632         return result;
633     }
634     Node* addToGraph(NodeType op, OpInfo info1, OpInfo info2, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
635     {
636         Node* result = m_graph.addNode(
637             SpecNone, op, NodeOrigin(currentCodeOrigin()), info1, info2,
638             Edge(child1), Edge(child2), Edge(child3));
639         ASSERT(op != Phi);
640         m_currentBlock->append(result);
641         return result;
642     }
643     
644     Node* addToGraph(Node::VarArgTag, NodeType op, OpInfo info1, OpInfo info2)
645     {
646         Node* result = m_graph.addNode(
647             SpecNone, Node::VarArg, op, NodeOrigin(currentCodeOrigin()), info1, info2,
648             m_graph.m_varArgChildren.size() - m_numPassedVarArgs, m_numPassedVarArgs);
649         ASSERT(op != Phi);
650         m_currentBlock->append(result);
651         
652         m_numPassedVarArgs = 0;
653         
654         return result;
655     }
656     
657     void removeLastNodeFromGraph(NodeType expectedNodeType)
658     {
659         Node* node = m_currentBlock->takeLast();
660         RELEASE_ASSERT(node->op() == expectedNodeType);
661         m_graph.m_allocator.free(node);
662     }
663
664     void addVarArgChild(Node* child)
665     {
666         m_graph.m_varArgChildren.append(Edge(child));
667         m_numPassedVarArgs++;
668     }
669     
670     Node* addCallWithoutSettingResult(
671         NodeType op, OpInfo opInfo, Node* callee, int argCount, int registerOffset,
672         SpeculatedType prediction)
673     {
674         addVarArgChild(callee);
675         size_t parameterSlots = JSStack::CallFrameHeaderSize - JSStack::CallerFrameAndPCSize + argCount;
676         if (parameterSlots > m_parameterSlots)
677             m_parameterSlots = parameterSlots;
678
679         int dummyThisArgument = op == Call || op == NativeCall || op == ProfiledCall ? 0 : 1;
680         for (int i = 0 + dummyThisArgument; i < argCount; ++i)
681             addVarArgChild(get(virtualRegisterForArgument(i, registerOffset)));
682
683         return addToGraph(Node::VarArg, op, opInfo, OpInfo(prediction));
684     }
685     
686     Node* addCall(
687         int result, NodeType op, OpInfo opInfo, Node* callee, int argCount, int registerOffset,
688         SpeculatedType prediction)
689     {
690         Node* call = addCallWithoutSettingResult(
691             op, opInfo, callee, argCount, registerOffset, prediction);
692         VirtualRegister resultReg(result);
693         if (resultReg.isValid())
694             set(VirtualRegister(result), call);
695         return call;
696     }
697     
698     Node* cellConstantWithStructureCheck(JSCell* object, Structure* structure)
699     {
700         Node* objectNode = weakJSConstant(object);
701         addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(structure)), objectNode);
702         return objectNode;
703     }
704     
705     SpeculatedType getPredictionWithoutOSRExit(unsigned bytecodeIndex)
706     {
707         ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
708         return m_inlineStackTop->m_profiledBlock->valueProfilePredictionForBytecodeOffset(locker, bytecodeIndex);
709     }
710
711     SpeculatedType getPrediction(unsigned bytecodeIndex)
712     {
713         SpeculatedType prediction = getPredictionWithoutOSRExit(bytecodeIndex);
714         
715         if (prediction == SpecNone) {
716             // We have no information about what values this node generates. Give up
717             // on executing this code, since we're likely to do more damage than good.
718             addToGraph(ForceOSRExit);
719         }
720         
721         return prediction;
722     }
723     
724     SpeculatedType getPredictionWithoutOSRExit()
725     {
726         return getPredictionWithoutOSRExit(m_currentIndex);
727     }
728     
729     SpeculatedType getPrediction()
730     {
731         return getPrediction(m_currentIndex);
732     }
733     
734     ArrayMode getArrayMode(ArrayProfile* profile, Array::Action action)
735     {
736         ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
737         profile->computeUpdatedPrediction(locker, m_inlineStackTop->m_profiledBlock);
738         return ArrayMode::fromObserved(locker, profile, action, false);
739     }
740     
741     ArrayMode getArrayMode(ArrayProfile* profile)
742     {
743         return getArrayMode(profile, Array::Read);
744     }
745     
746     ArrayMode getArrayModeConsideringSlowPath(ArrayProfile* profile, Array::Action action)
747     {
748         ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
749         
750         profile->computeUpdatedPrediction(locker, m_inlineStackTop->m_profiledBlock);
751         
752         bool makeSafe =
753             m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)
754             || profile->outOfBounds(locker);
755         
756         ArrayMode result = ArrayMode::fromObserved(locker, profile, action, makeSafe);
757         
758         return result;
759     }
760     
761     Node* makeSafe(Node* node)
762     {
763         if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
764             node->mergeFlags(NodeMayOverflowInDFG);
765         if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
766             node->mergeFlags(NodeMayNegZeroInDFG);
767         
768         if (!isX86() && node->op() == ArithMod)
769             return node;
770
771         if (!m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex))
772             return node;
773         
774         switch (node->op()) {
775         case UInt32ToNumber:
776         case ArithAdd:
777         case ArithSub:
778         case ValueAdd:
779         case ArithMod: // for ArithMod "MayOverflow" means we tried to divide by zero, or we saw double.
780             node->mergeFlags(NodeMayOverflowInBaseline);
781             break;
782             
783         case ArithNegate:
784             // Currently we can't tell the difference between a negation overflowing
785             // (i.e. -(1 << 31)) or generating negative zero (i.e. -0). If it took slow
786             // path then we assume that it did both of those things.
787             node->mergeFlags(NodeMayOverflowInBaseline);
788             node->mergeFlags(NodeMayNegZeroInBaseline);
789             break;
790
791         case ArithMul:
792             // FIXME: We should detect cases where we only overflowed but never created
793             // negative zero.
794             // https://bugs.webkit.org/show_bug.cgi?id=132470
795             if (m_inlineStackTop->m_profiledBlock->likelyToTakeDeepestSlowCase(m_currentIndex)
796                 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
797                 node->mergeFlags(NodeMayOverflowInBaseline | NodeMayNegZeroInBaseline);
798             else if (m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)
799                 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
800                 node->mergeFlags(NodeMayNegZeroInBaseline);
801             break;
802             
803         default:
804             RELEASE_ASSERT_NOT_REACHED();
805             break;
806         }
807         
808         return node;
809     }
810     
811     Node* makeDivSafe(Node* node)
812     {
813         ASSERT(node->op() == ArithDiv);
814         
815         if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
816             node->mergeFlags(NodeMayOverflowInDFG);
817         if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
818             node->mergeFlags(NodeMayNegZeroInDFG);
819         
820         // The main slow case counter for op_div in the old JIT counts only when
821         // the operands are not numbers. We don't care about that since we already
822         // have speculations in place that take care of that separately. We only
823         // care about when the outcome of the division is not an integer, which
824         // is what the special fast case counter tells us.
825         
826         if (!m_inlineStackTop->m_profiledBlock->couldTakeSpecialFastCase(m_currentIndex))
827             return node;
828         
829         // FIXME: It might be possible to make this more granular.
830         node->mergeFlags(NodeMayOverflowInBaseline | NodeMayNegZeroInBaseline);
831         
832         return node;
833     }
834     
835     void buildOperandMapsIfNecessary();
836     
837     VM* m_vm;
838     CodeBlock* m_codeBlock;
839     CodeBlock* m_profiledBlock;
840     Graph& m_graph;
841
842     // The current block being generated.
843     BasicBlock* m_currentBlock;
844     // The bytecode index of the current instruction being generated.
845     unsigned m_currentIndex;
846
847     FrozenValue* m_constantUndefined;
848     FrozenValue* m_constantNull;
849     FrozenValue* m_constantNaN;
850     FrozenValue* m_constantOne;
851     Vector<Node*, 16> m_constants;
852
853     // The number of arguments passed to the function.
854     unsigned m_numArguments;
855     // The number of locals (vars + temporaries) used in the function.
856     unsigned m_numLocals;
857     // The number of slots (in units of sizeof(Register)) that we need to
858     // preallocate for arguments to outgoing calls from this frame. This
859     // number includes the CallFrame slots that we initialize for the callee
860     // (but not the callee-initialized CallerFrame and ReturnPC slots).
861     // This number is 0 if and only if this function is a leaf.
862     unsigned m_parameterSlots;
863     // The number of var args passed to the next var arg node.
864     unsigned m_numPassedVarArgs;
865
866     HashMap<ConstantBufferKey, unsigned> m_constantBufferCache;
867     
868     Vector<VariableWatchpointSet*, 16> m_localWatchpoints;
869     
870     struct InlineStackEntry {
871         ByteCodeParser* m_byteCodeParser;
872         
873         CodeBlock* m_codeBlock;
874         CodeBlock* m_profiledBlock;
875         InlineCallFrame* m_inlineCallFrame;
876         
877         ScriptExecutable* executable() { return m_codeBlock->ownerExecutable(); }
878         
879         QueryableExitProfile m_exitProfile;
880         
881         // Remapping of identifier and constant numbers from the code block being
882         // inlined (inline callee) to the code block that we're inlining into
883         // (the machine code block, which is the transitive, though not necessarily
884         // direct, caller).
885         Vector<unsigned> m_identifierRemap;
886         Vector<unsigned> m_constantBufferRemap;
887         Vector<unsigned> m_switchRemap;
888         
889         // Blocks introduced by this code block, which need successor linking.
890         // May include up to one basic block that includes the continuation after
891         // the callsite in the caller. These must be appended in the order that they
892         // are created, but their bytecodeBegin values need not be in order as they
893         // are ignored.
894         Vector<UnlinkedBlock> m_unlinkedBlocks;
895         
896         // Potential block linking targets. Must be sorted by bytecodeBegin, and
897         // cannot have two blocks that have the same bytecodeBegin.
898         Vector<BasicBlock*> m_blockLinkingTargets;
899         
900         // If the callsite's basic block was split into two, then this will be
901         // the head of the callsite block. It needs its successors linked to the
902         // m_unlinkedBlocks, but not the other way around: there's no way for
903         // any blocks in m_unlinkedBlocks to jump back into this block.
904         BasicBlock* m_callsiteBlockHead;
905         
906         // Does the callsite block head need linking? This is typically true
907         // but will be false for the machine code block's inline stack entry
908         // (since that one is not inlined) and for cases where an inline callee
909         // did the linking for us.
910         bool m_callsiteBlockHeadNeedsLinking;
911         
912         VirtualRegister m_returnValue;
913         
914         // Speculations about variable types collected from the profiled code block,
915         // which are based on OSR exit profiles that past DFG compilatins of this
916         // code block had gathered.
917         LazyOperandValueProfileParser m_lazyOperands;
918         
919         CallLinkInfoMap m_callLinkInfos;
920         StubInfoMap m_stubInfos;
921         
922         // Did we see any returns? We need to handle the (uncommon but necessary)
923         // case where a procedure that does not return was inlined.
924         bool m_didReturn;
925         
926         // Did we have any early returns?
927         bool m_didEarlyReturn;
928         
929         // Pointers to the argument position trackers for this slice of code.
930         Vector<ArgumentPosition*> m_argumentPositions;
931         
932         InlineStackEntry* m_caller;
933         
934         InlineStackEntry(
935             ByteCodeParser*,
936             CodeBlock*,
937             CodeBlock* profiledBlock,
938             BasicBlock* callsiteBlockHead,
939             JSFunction* callee, // Null if this is a closure call.
940             VirtualRegister returnValueVR,
941             VirtualRegister inlineCallFrameStart,
942             int argumentCountIncludingThis,
943             InlineCallFrame::Kind);
944         
945         ~InlineStackEntry()
946         {
947             m_byteCodeParser->m_inlineStackTop = m_caller;
948         }
949         
950         VirtualRegister remapOperand(VirtualRegister operand) const
951         {
952             if (!m_inlineCallFrame)
953                 return operand;
954             
955             ASSERT(!operand.isConstant());
956
957             return VirtualRegister(operand.offset() + m_inlineCallFrame->stackOffset);
958         }
959     };
960     
961     InlineStackEntry* m_inlineStackTop;
962     
963     struct DelayedSetLocal {
964         VirtualRegister m_operand;
965         Node* m_value;
966         
967         DelayedSetLocal() { }
968         DelayedSetLocal(VirtualRegister operand, Node* value)
969             : m_operand(operand)
970             , m_value(value)
971         {
972         }
973         
974         Node* execute(ByteCodeParser* parser, SetMode setMode = NormalSet)
975         {
976             if (m_operand.isArgument())
977                 return parser->setArgument(m_operand, m_value, setMode);
978             return parser->setLocal(m_operand, m_value, setMode);
979         }
980     };
981     
982     Vector<DelayedSetLocal, 2> m_setLocalQueue;
983
984     // Have we built operand maps? We initialize them lazily, and only when doing
985     // inlining.
986     bool m_haveBuiltOperandMaps;
987     // Mapping between identifier names and numbers.
988     BorrowedIdentifierMap m_identifierMap;
989     
990     CodeBlock* m_dfgCodeBlock;
991     CallLinkStatus::ContextMap m_callContextMap;
992     StubInfoMap m_dfgStubInfos;
993     
994     Instruction* m_currentInstruction;
995 };
996
997 #define NEXT_OPCODE(name) \
998     m_currentIndex += OPCODE_LENGTH(name); \
999     continue
1000
1001 #define LAST_OPCODE(name) \
1002     m_currentIndex += OPCODE_LENGTH(name); \
1003     return shouldContinueParsing
1004
1005 void ByteCodeParser::handleCall(Instruction* pc, NodeType op, CodeSpecializationKind kind)
1006 {
1007     ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct));
1008     handleCall(
1009         pc[1].u.operand, op, kind, OPCODE_LENGTH(op_call),
1010         pc[2].u.operand, pc[3].u.operand, -pc[4].u.operand);
1011 }
1012
1013 void ByteCodeParser::handleCall(
1014     int result, NodeType op, CodeSpecializationKind kind, unsigned instructionSize,
1015     int callee, int argumentCountIncludingThis, int registerOffset)
1016 {
1017     Node* callTarget = get(VirtualRegister(callee));
1018     
1019     CallLinkStatus callLinkStatus = CallLinkStatus::computeFor(
1020         m_inlineStackTop->m_profiledBlock, currentCodeOrigin(),
1021         m_inlineStackTop->m_callLinkInfos, m_callContextMap);
1022     
1023     handleCall(
1024         result, op, InlineCallFrame::kindFor(kind), instructionSize, callTarget,
1025         argumentCountIncludingThis, registerOffset, callLinkStatus);
1026 }
1027     
1028 void ByteCodeParser::handleCall(
1029     int result, NodeType op, InlineCallFrame::Kind kind, unsigned instructionSize,
1030     Node* callTarget, int argumentCountIncludingThis, int registerOffset,
1031     CallLinkStatus callLinkStatus)
1032 {
1033     handleCall(
1034         result, op, kind, instructionSize, callTarget, argumentCountIncludingThis,
1035         registerOffset, callLinkStatus, getPrediction());
1036 }
1037
1038 void ByteCodeParser::handleCall(
1039     int result, NodeType op, InlineCallFrame::Kind kind, unsigned instructionSize,
1040     Node* callTarget, int argumentCountIncludingThis, int registerOffset,
1041     CallLinkStatus callLinkStatus, SpeculatedType prediction)
1042 {
1043     ASSERT(registerOffset <= 0);
1044     
1045     if (callTarget->hasConstant())
1046         callLinkStatus = CallLinkStatus(callTarget->asJSValue()).setIsProved(true);
1047     
1048     if ((!callLinkStatus.canOptimize() || callLinkStatus.size() != 1)
1049         && !isFTL(m_graph.m_plan.mode) && Options::useFTLJIT()
1050         && InlineCallFrame::isNormalCall(kind)
1051         && CallEdgeLog::isEnabled()
1052         && Options::dfgDoesCallEdgeProfiling()) {
1053         ASSERT(op == Call || op == Construct);
1054         if (op == Call)
1055             op = ProfiledCall;
1056         else
1057             op = ProfiledConstruct;
1058     }
1059     
1060     if (!callLinkStatus.canOptimize()) {
1061         // Oddly, this conflates calls that haven't executed with calls that behaved sufficiently polymorphically
1062         // that we cannot optimize them.
1063         
1064         addCall(result, op, OpInfo(), callTarget, argumentCountIncludingThis, registerOffset, prediction);
1065         return;
1066     }
1067     
1068     unsigned nextOffset = m_currentIndex + instructionSize;
1069     
1070     OpInfo callOpInfo;
1071     
1072     if (handleInlining(callTarget, result, callLinkStatus, registerOffset, argumentCountIncludingThis, nextOffset, op, kind, prediction)) {
1073         if (m_graph.compilation())
1074             m_graph.compilation()->noticeInlinedCall();
1075         return;
1076     }
1077     
1078 #if ENABLE(FTL_NATIVE_CALL_INLINING)
1079     if (isFTL(m_graph.m_plan.mode) && Options::optimizeNativeCalls() && callLinkStatus.size() == 1 && !callLinkStatus.couldTakeSlowPath()) {
1080         CallVariant callee = callLinkStatus[0].callee();
1081         JSFunction* function = callee.function();
1082         CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1083         if (function && function->isHostFunction()) {
1084             emitFunctionChecks(callee, callTarget, registerOffset, specializationKind);
1085             callOpInfo = OpInfo(m_graph.freeze(function));
1086
1087             if (op == Call || op == ProfiledCall)
1088                 op = NativeCall;
1089             else {
1090                 ASSERT(op == Construct || op == ProfiledConstruct);
1091                 op = NativeConstruct;
1092             }
1093         }
1094     }
1095 #endif
1096     
1097     addCall(result, op, callOpInfo, callTarget, argumentCountIncludingThis, registerOffset, prediction);
1098 }
1099
1100 void ByteCodeParser::emitFunctionChecks(CallVariant callee, Node* callTarget, int registerOffset, CodeSpecializationKind kind)
1101 {
1102     Node* thisArgument;
1103     if (kind == CodeForCall)
1104         thisArgument = get(virtualRegisterForArgument(0, registerOffset));
1105     else
1106         thisArgument = 0;
1107
1108     JSCell* calleeCell;
1109     Node* callTargetForCheck;
1110     if (callee.isClosureCall()) {
1111         calleeCell = callee.executable();
1112         callTargetForCheck = addToGraph(GetExecutable, callTarget);
1113     } else {
1114         calleeCell = callee.nonExecutableCallee();
1115         callTargetForCheck = callTarget;
1116     }
1117     
1118     ASSERT(calleeCell);
1119     addToGraph(CheckCell, OpInfo(m_graph.freeze(calleeCell)), callTargetForCheck, thisArgument);
1120 }
1121
1122 void ByteCodeParser::undoFunctionChecks(CallVariant callee)
1123 {
1124     removeLastNodeFromGraph(CheckCell);
1125     if (callee.isClosureCall())
1126         removeLastNodeFromGraph(GetExecutable);
1127 }
1128
1129 void ByteCodeParser::emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis, CodeSpecializationKind kind)
1130 {
1131     for (int i = kind == CodeForCall ? 0 : 1; i < argumentCountIncludingThis; ++i)
1132         addToGraph(Phantom, get(virtualRegisterForArgument(i, registerOffset)));
1133 }
1134
1135 unsigned ByteCodeParser::inliningCost(CallVariant callee, int argumentCountIncludingThis, CodeSpecializationKind kind)
1136 {
1137     if (verbose)
1138         dataLog("Considering inlining ", callee, " into ", currentCodeOrigin(), "\n");
1139     
1140     FunctionExecutable* executable = callee.functionExecutable();
1141     if (!executable) {
1142         if (verbose)
1143             dataLog("    Failing because there is no function executable.");
1144         return UINT_MAX;
1145     }
1146     
1147     // Does the number of arguments we're passing match the arity of the target? We currently
1148     // inline only if the number of arguments passed is greater than or equal to the number
1149     // arguments expected.
1150     if (static_cast<int>(executable->parameterCount()) + 1 > argumentCountIncludingThis) {
1151         if (verbose)
1152             dataLog("    Failing because of arity mismatch.\n");
1153         return UINT_MAX;
1154     }
1155     
1156     // Do we have a code block, and does the code block's size match the heuristics/requirements for
1157     // being an inline candidate? We might not have a code block if code was thrown away or if we
1158     // simply hadn't actually made this call yet. We could still theoretically attempt to inline it
1159     // if we had a static proof of what was being called; this might happen for example if you call a
1160     // global function, where watchpointing gives us static information. Overall, it's a rare case
1161     // because we expect that any hot callees would have already been compiled.
1162     CodeBlock* codeBlock = executable->baselineCodeBlockFor(kind);
1163     if (!codeBlock) {
1164         if (verbose)
1165             dataLog("    Failing because no code block available.\n");
1166         return UINT_MAX;
1167     }
1168     CapabilityLevel capabilityLevel = inlineFunctionForCapabilityLevel(
1169         codeBlock, kind, callee.isClosureCall());
1170     if (!canInline(capabilityLevel)) {
1171         if (verbose)
1172             dataLog("    Failing because the function is not inlineable.\n");
1173         return UINT_MAX;
1174     }
1175     
1176     // Check if the caller is already too large. We do this check here because that's just
1177     // where we happen to also have the callee's code block, and we want that for the
1178     // purpose of unsetting SABI.
1179     if (!isSmallEnoughToInlineCodeInto(m_codeBlock)) {
1180         codeBlock->m_shouldAlwaysBeInlined = false;
1181         if (verbose)
1182             dataLog("    Failing because the caller is too large.\n");
1183         return UINT_MAX;
1184     }
1185     
1186     // FIXME: this should be better at predicting how much bloat we will introduce by inlining
1187     // this function.
1188     // https://bugs.webkit.org/show_bug.cgi?id=127627
1189     
1190     // Have we exceeded inline stack depth, or are we trying to inline a recursive call to
1191     // too many levels? If either of these are detected, then don't inline. We adjust our
1192     // heuristics if we are dealing with a function that cannot otherwise be compiled.
1193     
1194     unsigned depth = 0;
1195     unsigned recursion = 0;
1196     
1197     for (InlineStackEntry* entry = m_inlineStackTop; entry; entry = entry->m_caller) {
1198         ++depth;
1199         if (depth >= Options::maximumInliningDepth()) {
1200             if (verbose)
1201                 dataLog("    Failing because depth exceeded.\n");
1202             return UINT_MAX;
1203         }
1204         
1205         if (entry->executable() == executable) {
1206             ++recursion;
1207             if (recursion >= Options::maximumInliningRecursion()) {
1208                 if (verbose)
1209                     dataLog("    Failing because recursion detected.\n");
1210                 return UINT_MAX;
1211             }
1212         }
1213     }
1214     
1215     if (verbose)
1216         dataLog("    Inlining should be possible.\n");
1217     
1218     // It might be possible to inline.
1219     return codeBlock->instructionCount();
1220 }
1221
1222 void ByteCodeParser::inlineCall(Node* callTargetNode, int resultOperand, CallVariant callee, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind kind, CallerLinkability callerLinkability)
1223 {
1224     CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1225     
1226     ASSERT(inliningCost(callee, argumentCountIncludingThis, specializationKind) != UINT_MAX);
1227     
1228     CodeBlock* codeBlock = callee.functionExecutable()->baselineCodeBlockFor(specializationKind);
1229
1230     // FIXME: Don't flush constants!
1231     
1232     int inlineCallFrameStart = m_inlineStackTop->remapOperand(VirtualRegister(registerOffset)).offset() + JSStack::CallFrameHeaderSize;
1233     
1234     ensureLocals(
1235         VirtualRegister(inlineCallFrameStart).toLocal() + 1 +
1236         JSStack::CallFrameHeaderSize + codeBlock->m_numCalleeRegisters);
1237     
1238     size_t argumentPositionStart = m_graph.m_argumentPositions.size();
1239
1240     VirtualRegister resultReg(resultOperand);
1241     if (resultReg.isValid())
1242         resultReg = m_inlineStackTop->remapOperand(resultReg);
1243     
1244     InlineStackEntry inlineStackEntry(
1245         this, codeBlock, codeBlock, m_graph.lastBlock(), callee.function(), resultReg,
1246         (VirtualRegister)inlineCallFrameStart, argumentCountIncludingThis, kind);
1247     
1248     // This is where the actual inlining really happens.
1249     unsigned oldIndex = m_currentIndex;
1250     m_currentIndex = 0;
1251
1252     InlineVariableData inlineVariableData;
1253     inlineVariableData.inlineCallFrame = m_inlineStackTop->m_inlineCallFrame;
1254     inlineVariableData.argumentPositionStart = argumentPositionStart;
1255     inlineVariableData.calleeVariable = 0;
1256     
1257     RELEASE_ASSERT(
1258         m_inlineStackTop->m_inlineCallFrame->isClosureCall
1259         == callee.isClosureCall());
1260     if (callee.isClosureCall()) {
1261         VariableAccessData* calleeVariable =
1262             set(VirtualRegister(JSStack::Callee), callTargetNode, ImmediateNakedSet)->variableAccessData();
1263         VariableAccessData* scopeVariable =
1264             set(VirtualRegister(JSStack::ScopeChain), addToGraph(GetScope, callTargetNode), ImmediateNakedSet)->variableAccessData();
1265         
1266         calleeVariable->mergeShouldNeverUnbox(true);
1267         scopeVariable->mergeShouldNeverUnbox(true);
1268         
1269         inlineVariableData.calleeVariable = calleeVariable;
1270     }
1271     
1272     m_graph.m_inlineVariableData.append(inlineVariableData);
1273     
1274     parseCodeBlock();
1275     clearCaches(); // Reset our state now that we're back to the outer code.
1276     
1277     m_currentIndex = oldIndex;
1278     
1279     // If the inlined code created some new basic blocks, then we have linking to do.
1280     if (inlineStackEntry.m_callsiteBlockHead != m_graph.lastBlock()) {
1281         
1282         ASSERT(!inlineStackEntry.m_unlinkedBlocks.isEmpty());
1283         if (inlineStackEntry.m_callsiteBlockHeadNeedsLinking)
1284             linkBlock(inlineStackEntry.m_callsiteBlockHead, inlineStackEntry.m_blockLinkingTargets);
1285         else
1286             ASSERT(inlineStackEntry.m_callsiteBlockHead->isLinked);
1287         
1288         if (callerLinkability == CallerDoesNormalLinking)
1289             cancelLinkingForBlock(inlineStackEntry.m_caller, inlineStackEntry.m_callsiteBlockHead);
1290         
1291         linkBlocks(inlineStackEntry.m_unlinkedBlocks, inlineStackEntry.m_blockLinkingTargets);
1292     } else
1293         ASSERT(inlineStackEntry.m_unlinkedBlocks.isEmpty());
1294     
1295     BasicBlock* lastBlock = m_graph.lastBlock();
1296     // If there was a return, but no early returns, then we're done. We allow parsing of
1297     // the caller to continue in whatever basic block we're in right now.
1298     if (!inlineStackEntry.m_didEarlyReturn && inlineStackEntry.m_didReturn) {
1299         ASSERT(lastBlock->isEmpty() || !lastBlock->last()->isTerminal());
1300         
1301         // If we created new blocks then the last block needs linking, but in the
1302         // caller. It doesn't need to be linked to, but it needs outgoing links.
1303         if (!inlineStackEntry.m_unlinkedBlocks.isEmpty()) {
1304             // For debugging purposes, set the bytecodeBegin. Note that this doesn't matter
1305             // for release builds because this block will never serve as a potential target
1306             // in the linker's binary search.
1307             lastBlock->bytecodeBegin = m_currentIndex;
1308             if (callerLinkability == CallerDoesNormalLinking) {
1309                 if (verbose)
1310                     dataLog("Adding unlinked block ", RawPointer(m_graph.lastBlock()), " (one return)\n");
1311                 m_inlineStackTop->m_caller->m_unlinkedBlocks.append(UnlinkedBlock(m_graph.lastBlock()));
1312             }
1313         }
1314         
1315         m_currentBlock = m_graph.lastBlock();
1316         return;
1317     }
1318     
1319     // If we get to this point then all blocks must end in some sort of terminals.
1320     ASSERT(lastBlock->last()->isTerminal());
1321
1322     // Need to create a new basic block for the continuation at the caller.
1323     RefPtr<BasicBlock> block = adoptRef(new BasicBlock(nextOffset, m_numArguments, m_numLocals, PNaN));
1324
1325     // Link the early returns to the basic block we're about to create.
1326     for (size_t i = 0; i < inlineStackEntry.m_unlinkedBlocks.size(); ++i) {
1327         if (!inlineStackEntry.m_unlinkedBlocks[i].m_needsEarlyReturnLinking)
1328             continue;
1329         BasicBlock* blockToLink = inlineStackEntry.m_unlinkedBlocks[i].m_block;
1330         ASSERT(!blockToLink->isLinked);
1331         Node* node = blockToLink->last();
1332         ASSERT(node->op() == Jump);
1333         ASSERT(!node->targetBlock());
1334         node->targetBlock() = block.get();
1335         inlineStackEntry.m_unlinkedBlocks[i].m_needsEarlyReturnLinking = false;
1336         if (verbose)
1337             dataLog("Marking ", RawPointer(blockToLink), " as linked (jumps to return)\n");
1338         blockToLink->didLink();
1339     }
1340     
1341     m_currentBlock = block.get();
1342     ASSERT(m_inlineStackTop->m_caller->m_blockLinkingTargets.isEmpty() || m_inlineStackTop->m_caller->m_blockLinkingTargets.last()->bytecodeBegin < nextOffset);
1343     if (verbose)
1344         dataLog("Adding unlinked block ", RawPointer(block.get()), " (many returns)\n");
1345     if (callerLinkability == CallerDoesNormalLinking) {
1346         m_inlineStackTop->m_caller->m_unlinkedBlocks.append(UnlinkedBlock(block.get()));
1347         m_inlineStackTop->m_caller->m_blockLinkingTargets.append(block.get());
1348     }
1349     m_graph.appendBlock(block);
1350     prepareToParseBlock();
1351 }
1352
1353 void ByteCodeParser::cancelLinkingForBlock(InlineStackEntry* inlineStackEntry, BasicBlock* block)
1354 {
1355     // It's possible that the callsite block head is not owned by the caller.
1356     if (!inlineStackEntry->m_unlinkedBlocks.isEmpty()) {
1357         // It's definitely owned by the caller, because the caller created new blocks.
1358         // Assert that this all adds up.
1359         ASSERT_UNUSED(block, inlineStackEntry->m_unlinkedBlocks.last().m_block == block);
1360         ASSERT(inlineStackEntry->m_unlinkedBlocks.last().m_needsNormalLinking);
1361         inlineStackEntry->m_unlinkedBlocks.last().m_needsNormalLinking = false;
1362     } else {
1363         // It's definitely not owned by the caller. Tell the caller that he does not
1364         // need to link his callsite block head, because we did it for him.
1365         ASSERT(inlineStackEntry->m_callsiteBlockHeadNeedsLinking);
1366         ASSERT_UNUSED(block, inlineStackEntry->m_callsiteBlockHead == block);
1367         inlineStackEntry->m_callsiteBlockHeadNeedsLinking = false;
1368     }
1369 }
1370
1371 bool ByteCodeParser::attemptToInlineCall(Node* callTargetNode, int resultOperand, CallVariant callee, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind kind, CallerLinkability callerLinkability, SpeculatedType prediction, unsigned& inliningBalance)
1372 {
1373     CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1374     
1375     if (!inliningBalance)
1376         return false;
1377     
1378     if (InternalFunction* function = callee.internalFunction()) {
1379         if (handleConstantInternalFunction(resultOperand, function, registerOffset, argumentCountIncludingThis, specializationKind)) {
1380             addToGraph(Phantom, callTargetNode);
1381             emitArgumentPhantoms(registerOffset, argumentCountIncludingThis, specializationKind);
1382             inliningBalance--;
1383             return true;
1384         }
1385         return false;
1386     }
1387     
1388     Intrinsic intrinsic = callee.intrinsicFor(specializationKind);
1389     if (intrinsic != NoIntrinsic) {
1390         if (handleIntrinsic(resultOperand, intrinsic, registerOffset, argumentCountIncludingThis, prediction)) {
1391             addToGraph(Phantom, callTargetNode);
1392             emitArgumentPhantoms(registerOffset, argumentCountIncludingThis, specializationKind);
1393             inliningBalance--;
1394             return true;
1395         }
1396         return false;
1397     }
1398     
1399     unsigned myInliningCost = inliningCost(callee, argumentCountIncludingThis, specializationKind);
1400     if (myInliningCost > inliningBalance)
1401         return false;
1402     
1403     inlineCall(callTargetNode, resultOperand, callee, registerOffset, argumentCountIncludingThis, nextOffset, kind, callerLinkability);
1404     inliningBalance -= myInliningCost;
1405     return true;
1406 }
1407
1408 bool ByteCodeParser::handleInlining(Node* callTargetNode, int resultOperand, const CallLinkStatus& callLinkStatus, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, NodeType callOp, InlineCallFrame::Kind kind, SpeculatedType prediction)
1409 {
1410     if (verbose) {
1411         dataLog("Handling inlining...\n");
1412         dataLog("Stack: ", currentCodeOrigin(), "\n");
1413     }
1414     CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1415     
1416     if (!callLinkStatus.size()) {
1417         if (verbose)
1418             dataLog("Bailing inlining.\n");
1419         return false;
1420     }
1421     
1422     unsigned inliningBalance = Options::maximumFunctionForCallInlineCandidateInstructionCount();
1423     if (specializationKind == CodeForConstruct)
1424         inliningBalance = std::min(inliningBalance, Options::maximumFunctionForConstructInlineCandidateInstructionCount());
1425     if (callLinkStatus.isClosureCall())
1426         inliningBalance = std::min(inliningBalance, Options::maximumFunctionForClosureCallInlineCandidateInstructionCount());
1427     
1428     // First check if we can avoid creating control flow. Our inliner does some CFG
1429     // simplification on the fly and this helps reduce compile times, but we can only leverage
1430     // this in cases where we don't need control flow diamonds to check the callee.
1431     if (!callLinkStatus.couldTakeSlowPath() && callLinkStatus.size() == 1) {
1432         emitFunctionChecks(
1433             callLinkStatus[0].callee(), callTargetNode, registerOffset, specializationKind);
1434         bool result = attemptToInlineCall(
1435             callTargetNode, resultOperand, callLinkStatus[0].callee(), registerOffset,
1436             argumentCountIncludingThis, nextOffset, kind, CallerDoesNormalLinking, prediction,
1437             inliningBalance);
1438         if (!result && !callLinkStatus.isProved())
1439             undoFunctionChecks(callLinkStatus[0].callee());
1440         if (verbose) {
1441             dataLog("Done inlining (simple).\n");
1442             dataLog("Stack: ", currentCodeOrigin(), "\n");
1443         }
1444         return result;
1445     }
1446     
1447     // We need to create some kind of switch over callee. For now we only do this if we believe that
1448     // we're in the top tier. We have two reasons for this: first, it provides us an opportunity to
1449     // do more detailed polyvariant/polymorphic profiling; and second, it reduces compile times in
1450     // the DFG. And by polyvariant profiling we mean polyvariant profiling of *this* call. Note that
1451     // we could improve that aspect of this by doing polymorphic inlining but having the profiling
1452     // also. Currently we opt against this, but it could be interesting. That would require having a
1453     // separate node for call edge profiling.
1454     // FIXME: Introduce the notion of a separate call edge profiling node.
1455     // https://bugs.webkit.org/show_bug.cgi?id=136033
1456     if (!isFTL(m_graph.m_plan.mode) || !Options::enablePolymorphicCallInlining()) {
1457         if (verbose) {
1458             dataLog("Bailing inlining (hard).\n");
1459             dataLog("Stack: ", currentCodeOrigin(), "\n");
1460         }
1461         return false;
1462     }
1463     
1464     unsigned oldOffset = m_currentIndex;
1465     
1466     bool allAreClosureCalls = true;
1467     bool allAreDirectCalls = true;
1468     for (unsigned i = callLinkStatus.size(); i--;) {
1469         if (callLinkStatus[i].callee().isClosureCall())
1470             allAreDirectCalls = false;
1471         else
1472             allAreClosureCalls = false;
1473     }
1474     
1475     Node* thingToSwitchOn;
1476     if (allAreDirectCalls)
1477         thingToSwitchOn = callTargetNode;
1478     else if (allAreClosureCalls)
1479         thingToSwitchOn = addToGraph(GetExecutable, callTargetNode);
1480     else {
1481         // FIXME: We should be able to handle this case, but it's tricky and we don't know of cases
1482         // where it would be beneficial. Also, CallLinkStatus would make all callees appear like
1483         // closure calls if any calls were closure calls - except for calls to internal functions.
1484         // So this will only arise if some callees are internal functions and others are closures.
1485         // https://bugs.webkit.org/show_bug.cgi?id=136020
1486         if (verbose) {
1487             dataLog("Bailing inlining (mix).\n");
1488             dataLog("Stack: ", currentCodeOrigin(), "\n");
1489         }
1490         return false;
1491     }
1492     
1493     if (verbose) {
1494         dataLog("Doing hard inlining...\n");
1495         dataLog("Stack: ", currentCodeOrigin(), "\n");
1496     }
1497     
1498     // This makes me wish that we were in SSA all the time. We need to pick a variable into which to
1499     // store the callee so that it will be accessible to all of the blocks we're about to create. We
1500     // get away with doing an immediate-set here because we wouldn't have performed any side effects
1501     // yet.
1502     if (verbose)
1503         dataLog("Register offset: ", registerOffset);
1504     VirtualRegister calleeReg(registerOffset + JSStack::Callee);
1505     calleeReg = m_inlineStackTop->remapOperand(calleeReg);
1506     if (verbose)
1507         dataLog("Callee is going to be ", calleeReg, "\n");
1508     setDirect(calleeReg, callTargetNode, ImmediateSetWithFlush);
1509     
1510     SwitchData& data = *m_graph.m_switchData.add();
1511     data.kind = SwitchCell;
1512     addToGraph(Switch, OpInfo(&data), thingToSwitchOn);
1513     
1514     BasicBlock* originBlock = m_currentBlock;
1515     if (verbose)
1516         dataLog("Marking ", RawPointer(originBlock), " as linked (origin of poly inline)\n");
1517     originBlock->didLink();
1518     cancelLinkingForBlock(m_inlineStackTop, originBlock);
1519     
1520     // Each inlined callee will have a landing block that it returns at. They should all have jumps
1521     // to the continuation block, which we create last.
1522     Vector<BasicBlock*> landingBlocks;
1523     
1524     // We make force this true if we give up on inlining any of the edges.
1525     bool couldTakeSlowPath = callLinkStatus.couldTakeSlowPath();
1526     
1527     if (verbose)
1528         dataLog("About to loop over functions at ", currentCodeOrigin(), ".\n");
1529     
1530     for (unsigned i = 0; i < callLinkStatus.size(); ++i) {
1531         m_currentIndex = oldOffset;
1532         RefPtr<BasicBlock> block = adoptRef(new BasicBlock(UINT_MAX, m_numArguments, m_numLocals, PNaN));
1533         m_currentBlock = block.get();
1534         m_graph.appendBlock(block);
1535         prepareToParseBlock();
1536         
1537         Node* myCallTargetNode = getDirect(calleeReg);
1538         
1539         bool inliningResult = attemptToInlineCall(
1540             myCallTargetNode, resultOperand, callLinkStatus[i].callee(), registerOffset,
1541             argumentCountIncludingThis, nextOffset, kind, CallerLinksManually, prediction,
1542             inliningBalance);
1543         
1544         if (!inliningResult) {
1545             // That failed so we let the block die. Nothing interesting should have been added to
1546             // the block. We also give up on inlining any of the (less frequent) callees.
1547             ASSERT(m_currentBlock == block.get());
1548             ASSERT(m_graph.m_blocks.last() == block);
1549             m_graph.killBlockAndItsContents(block.get());
1550             m_graph.m_blocks.removeLast();
1551             
1552             // The fact that inlining failed means we need a slow path.
1553             couldTakeSlowPath = true;
1554             break;
1555         }
1556         
1557         JSCell* thingToCaseOn;
1558         if (allAreDirectCalls)
1559             thingToCaseOn = callLinkStatus[i].callee().nonExecutableCallee();
1560         else {
1561             ASSERT(allAreClosureCalls);
1562             thingToCaseOn = callLinkStatus[i].callee().executable();
1563         }
1564         data.cases.append(SwitchCase(m_graph.freeze(thingToCaseOn), block.get()));
1565         m_currentIndex = nextOffset;
1566         processSetLocalQueue(); // This only comes into play for intrinsics, since normal inlined code will leave an empty queue.
1567         addToGraph(Jump);
1568         if (verbose)
1569             dataLog("Marking ", RawPointer(m_currentBlock), " as linked (tail of poly inlinee)\n");
1570         m_currentBlock->didLink();
1571         landingBlocks.append(m_currentBlock);
1572
1573         if (verbose)
1574             dataLog("Finished inlining ", callLinkStatus[i].callee(), " at ", currentCodeOrigin(), ".\n");
1575     }
1576     
1577     RefPtr<BasicBlock> slowPathBlock = adoptRef(
1578         new BasicBlock(UINT_MAX, m_numArguments, m_numLocals, PNaN));
1579     m_currentIndex = oldOffset;
1580     data.fallThrough = BranchTarget(slowPathBlock.get());
1581     m_graph.appendBlock(slowPathBlock);
1582     if (verbose)
1583         dataLog("Marking ", RawPointer(slowPathBlock.get()), " as linked (slow path block)\n");
1584     slowPathBlock->didLink();
1585     prepareToParseBlock();
1586     m_currentBlock = slowPathBlock.get();
1587     Node* myCallTargetNode = getDirect(calleeReg);
1588     if (couldTakeSlowPath) {
1589         addCall(
1590             resultOperand, callOp, OpInfo(), myCallTargetNode, argumentCountIncludingThis,
1591             registerOffset, prediction);
1592     } else {
1593         addToGraph(CheckBadCell);
1594         addToGraph(Phantom, myCallTargetNode);
1595         emitArgumentPhantoms(registerOffset, argumentCountIncludingThis, specializationKind);
1596         
1597         set(VirtualRegister(resultOperand), addToGraph(BottomValue));
1598     }
1599
1600     m_currentIndex = nextOffset;
1601     processSetLocalQueue();
1602     addToGraph(Jump);
1603     landingBlocks.append(m_currentBlock);
1604     
1605     RefPtr<BasicBlock> continuationBlock = adoptRef(
1606         new BasicBlock(UINT_MAX, m_numArguments, m_numLocals, PNaN));
1607     m_graph.appendBlock(continuationBlock);
1608     if (verbose)
1609         dataLog("Adding unlinked block ", RawPointer(continuationBlock.get()), " (continuation)\n");
1610     m_inlineStackTop->m_unlinkedBlocks.append(UnlinkedBlock(continuationBlock.get()));
1611     prepareToParseBlock();
1612     m_currentBlock = continuationBlock.get();
1613     
1614     for (unsigned i = landingBlocks.size(); i--;)
1615         landingBlocks[i]->last()->targetBlock() = continuationBlock.get();
1616     
1617     m_currentIndex = oldOffset;
1618     
1619     if (verbose) {
1620         dataLog("Done inlining (hard).\n");
1621         dataLog("Stack: ", currentCodeOrigin(), "\n");
1622     }
1623     return true;
1624 }
1625
1626 bool ByteCodeParser::handleMinMax(int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis)
1627 {
1628     if (argumentCountIncludingThis == 1) { // Math.min()
1629         set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN)));
1630         return true;
1631     }
1632      
1633     if (argumentCountIncludingThis == 2) { // Math.min(x)
1634         Node* result = get(VirtualRegister(virtualRegisterForArgument(1, registerOffset)));
1635         addToGraph(Phantom, Edge(result, NumberUse));
1636         set(VirtualRegister(resultOperand), result);
1637         return true;
1638     }
1639     
1640     if (argumentCountIncludingThis == 3) { // Math.min(x, y)
1641         set(VirtualRegister(resultOperand), addToGraph(op, get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset))));
1642         return true;
1643     }
1644     
1645     // Don't handle >=3 arguments for now.
1646     return false;
1647 }
1648
1649 bool ByteCodeParser::handleIntrinsic(int resultOperand, Intrinsic intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction)
1650 {
1651     switch (intrinsic) {
1652     case AbsIntrinsic: {
1653         if (argumentCountIncludingThis == 1) { // Math.abs()
1654             set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN)));
1655             return true;
1656         }
1657
1658         if (!MacroAssembler::supportsFloatingPointAbs())
1659             return false;
1660
1661         Node* node = addToGraph(ArithAbs, get(virtualRegisterForArgument(1, registerOffset)));
1662         if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
1663             node->mergeFlags(NodeMayOverflowInDFG);
1664         set(VirtualRegister(resultOperand), node);
1665         return true;
1666     }
1667
1668     case MinIntrinsic:
1669         return handleMinMax(resultOperand, ArithMin, registerOffset, argumentCountIncludingThis);
1670         
1671     case MaxIntrinsic:
1672         return handleMinMax(resultOperand, ArithMax, registerOffset, argumentCountIncludingThis);
1673         
1674     case SqrtIntrinsic:
1675     case CosIntrinsic:
1676     case SinIntrinsic: {
1677         if (argumentCountIncludingThis == 1) {
1678             set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN)));
1679             return true;
1680         }
1681         
1682         switch (intrinsic) {
1683         case SqrtIntrinsic:
1684             if (!MacroAssembler::supportsFloatingPointSqrt())
1685                 return false;
1686             
1687             set(VirtualRegister(resultOperand), addToGraph(ArithSqrt, get(virtualRegisterForArgument(1, registerOffset))));
1688             return true;
1689             
1690         case CosIntrinsic:
1691             set(VirtualRegister(resultOperand), addToGraph(ArithCos, get(virtualRegisterForArgument(1, registerOffset))));
1692             return true;
1693             
1694         case SinIntrinsic:
1695             set(VirtualRegister(resultOperand), addToGraph(ArithSin, get(virtualRegisterForArgument(1, registerOffset))));
1696             return true;
1697             
1698         default:
1699             RELEASE_ASSERT_NOT_REACHED();
1700             return false;
1701         }
1702     }
1703         
1704     case ArrayPushIntrinsic: {
1705         if (argumentCountIncludingThis != 2)
1706             return false;
1707         
1708         ArrayMode arrayMode = getArrayMode(m_currentInstruction[OPCODE_LENGTH(op_call) - 2].u.arrayProfile);
1709         if (!arrayMode.isJSArray())
1710             return false;
1711         switch (arrayMode.type()) {
1712         case Array::Undecided:
1713         case Array::Int32:
1714         case Array::Double:
1715         case Array::Contiguous:
1716         case Array::ArrayStorage: {
1717             Node* arrayPush = addToGraph(ArrayPush, OpInfo(arrayMode.asWord()), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)));
1718             set(VirtualRegister(resultOperand), arrayPush);
1719             
1720             return true;
1721         }
1722             
1723         default:
1724             return false;
1725         }
1726     }
1727         
1728     case ArrayPopIntrinsic: {
1729         if (argumentCountIncludingThis != 1)
1730             return false;
1731         
1732         ArrayMode arrayMode = getArrayMode(m_currentInstruction[OPCODE_LENGTH(op_call) - 2].u.arrayProfile);
1733         if (!arrayMode.isJSArray())
1734             return false;
1735         switch (arrayMode.type()) {
1736         case Array::Int32:
1737         case Array::Double:
1738         case Array::Contiguous:
1739         case Array::ArrayStorage: {
1740             Node* arrayPop = addToGraph(ArrayPop, OpInfo(arrayMode.asWord()), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)));
1741             set(VirtualRegister(resultOperand), arrayPop);
1742             return true;
1743         }
1744             
1745         default:
1746             return false;
1747         }
1748     }
1749
1750     case CharCodeAtIntrinsic: {
1751         if (argumentCountIncludingThis != 2)
1752             return false;
1753
1754         VirtualRegister thisOperand = virtualRegisterForArgument(0, registerOffset);
1755         VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
1756         Node* charCode = addToGraph(StringCharCodeAt, OpInfo(ArrayMode(Array::String).asWord()), get(thisOperand), get(indexOperand));
1757
1758         set(VirtualRegister(resultOperand), charCode);
1759         return true;
1760     }
1761
1762     case CharAtIntrinsic: {
1763         if (argumentCountIncludingThis != 2)
1764             return false;
1765
1766         VirtualRegister thisOperand = virtualRegisterForArgument(0, registerOffset);
1767         VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
1768         Node* charCode = addToGraph(StringCharAt, OpInfo(ArrayMode(Array::String).asWord()), get(thisOperand), get(indexOperand));
1769
1770         set(VirtualRegister(resultOperand), charCode);
1771         return true;
1772     }
1773     case FromCharCodeIntrinsic: {
1774         if (argumentCountIncludingThis != 2)
1775             return false;
1776
1777         VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
1778         Node* charCode = addToGraph(StringFromCharCode, get(indexOperand));
1779
1780         set(VirtualRegister(resultOperand), charCode);
1781
1782         return true;
1783     }
1784
1785     case RegExpExecIntrinsic: {
1786         if (argumentCountIncludingThis != 2)
1787             return false;
1788         
1789         Node* regExpExec = addToGraph(RegExpExec, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)));
1790         set(VirtualRegister(resultOperand), regExpExec);
1791         
1792         return true;
1793     }
1794         
1795     case RegExpTestIntrinsic: {
1796         if (argumentCountIncludingThis != 2)
1797             return false;
1798         
1799         Node* regExpExec = addToGraph(RegExpTest, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)));
1800         set(VirtualRegister(resultOperand), regExpExec);
1801         
1802         return true;
1803     }
1804
1805     case IMulIntrinsic: {
1806         if (argumentCountIncludingThis != 3)
1807             return false;
1808         VirtualRegister leftOperand = virtualRegisterForArgument(1, registerOffset);
1809         VirtualRegister rightOperand = virtualRegisterForArgument(2, registerOffset);
1810         Node* left = get(leftOperand);
1811         Node* right = get(rightOperand);
1812         set(VirtualRegister(resultOperand), addToGraph(ArithIMul, left, right));
1813         return true;
1814     }
1815         
1816     case FRoundIntrinsic: {
1817         if (argumentCountIncludingThis != 2)
1818             return false;
1819         VirtualRegister operand = virtualRegisterForArgument(1, registerOffset);
1820         set(VirtualRegister(resultOperand), addToGraph(ArithFRound, get(operand)));
1821         return true;
1822     }
1823         
1824     case DFGTrueIntrinsic: {
1825         set(VirtualRegister(resultOperand), jsConstant(jsBoolean(true)));
1826         return true;
1827     }
1828         
1829     case OSRExitIntrinsic: {
1830         addToGraph(ForceOSRExit);
1831         set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantUndefined)));
1832         return true;
1833     }
1834         
1835     case IsFinalTierIntrinsic: {
1836         set(VirtualRegister(resultOperand),
1837             jsConstant(jsBoolean(Options::useFTLJIT() ? isFTL(m_graph.m_plan.mode) : true)));
1838         return true;
1839     }
1840         
1841     case SetInt32HeapPredictionIntrinsic: {
1842         for (int i = 1; i < argumentCountIncludingThis; ++i) {
1843             Node* node = get(virtualRegisterForArgument(i, registerOffset));
1844             if (node->hasHeapPrediction())
1845                 node->setHeapPrediction(SpecInt32);
1846         }
1847         set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantUndefined)));
1848         return true;
1849     }
1850         
1851     case FiatInt52Intrinsic: {
1852         if (argumentCountIncludingThis != 2)
1853             return false;
1854         VirtualRegister operand = virtualRegisterForArgument(1, registerOffset);
1855         if (enableInt52())
1856             set(VirtualRegister(resultOperand), addToGraph(FiatInt52, get(operand)));
1857         else
1858             set(VirtualRegister(resultOperand), get(operand));
1859         return true;
1860     }
1861         
1862     default:
1863         return false;
1864     }
1865 }
1866
1867 bool ByteCodeParser::handleTypedArrayConstructor(
1868     int resultOperand, InternalFunction* function, int registerOffset,
1869     int argumentCountIncludingThis, TypedArrayType type)
1870 {
1871     if (!isTypedView(type))
1872         return false;
1873     
1874     if (function->classInfo() != constructorClassInfoForType(type))
1875         return false;
1876     
1877     if (function->globalObject() != m_inlineStackTop->m_codeBlock->globalObject())
1878         return false;
1879     
1880     // We only have an intrinsic for the case where you say:
1881     //
1882     // new FooArray(blah);
1883     //
1884     // Of course, 'blah' could be any of the following:
1885     //
1886     // - Integer, indicating that you want to allocate an array of that length.
1887     //   This is the thing we're hoping for, and what we can actually do meaningful
1888     //   optimizations for.
1889     //
1890     // - Array buffer, indicating that you want to create a view onto that _entire_
1891     //   buffer.
1892     //
1893     // - Non-buffer object, indicating that you want to create a copy of that
1894     //   object by pretending that it quacks like an array.
1895     //
1896     // - Anything else, indicating that you want to have an exception thrown at
1897     //   you.
1898     //
1899     // The intrinsic, NewTypedArray, will behave as if it could do any of these
1900     // things up until we do Fixup. Thereafter, if child1 (i.e. 'blah') is
1901     // predicted Int32, then we lock it in as a normal typed array allocation.
1902     // Otherwise, NewTypedArray turns into a totally opaque function call that
1903     // may clobber the world - by virtue of it accessing properties on what could
1904     // be an object.
1905     //
1906     // Note that although the generic form of NewTypedArray sounds sort of awful,
1907     // it is actually quite likely to be more efficient than a fully generic
1908     // Construct. So, we might want to think about making NewTypedArray variadic,
1909     // or else making Construct not super slow.
1910     
1911     if (argumentCountIncludingThis != 2)
1912         return false;
1913     
1914     set(VirtualRegister(resultOperand),
1915         addToGraph(NewTypedArray, OpInfo(type), get(virtualRegisterForArgument(1, registerOffset))));
1916     return true;
1917 }
1918
1919 bool ByteCodeParser::handleConstantInternalFunction(
1920     int resultOperand, InternalFunction* function, int registerOffset,
1921     int argumentCountIncludingThis, CodeSpecializationKind kind)
1922 {
1923     // If we ever find that we have a lot of internal functions that we specialize for,
1924     // then we should probably have some sort of hashtable dispatch, or maybe even
1925     // dispatch straight through the MethodTable of the InternalFunction. But for now,
1926     // it seems that this case is hit infrequently enough, and the number of functions
1927     // we know about is small enough, that having just a linear cascade of if statements
1928     // is good enough.
1929     
1930     if (function->classInfo() == ArrayConstructor::info()) {
1931         if (function->globalObject() != m_inlineStackTop->m_codeBlock->globalObject())
1932             return false;
1933         
1934         if (argumentCountIncludingThis == 2) {
1935             set(VirtualRegister(resultOperand),
1936                 addToGraph(NewArrayWithSize, OpInfo(ArrayWithUndecided), get(virtualRegisterForArgument(1, registerOffset))));
1937             return true;
1938         }
1939         
1940         for (int i = 1; i < argumentCountIncludingThis; ++i)
1941             addVarArgChild(get(virtualRegisterForArgument(i, registerOffset)));
1942         set(VirtualRegister(resultOperand),
1943             addToGraph(Node::VarArg, NewArray, OpInfo(ArrayWithUndecided), OpInfo(0)));
1944         return true;
1945     }
1946     
1947     if (function->classInfo() == StringConstructor::info()) {
1948         Node* result;
1949         
1950         if (argumentCountIncludingThis <= 1)
1951             result = jsConstant(m_vm->smallStrings.emptyString());
1952         else
1953             result = addToGraph(ToString, get(virtualRegisterForArgument(1, registerOffset)));
1954         
1955         if (kind == CodeForConstruct)
1956             result = addToGraph(NewStringObject, OpInfo(function->globalObject()->stringObjectStructure()), result);
1957         
1958         set(VirtualRegister(resultOperand), result);
1959         return true;
1960     }
1961     
1962     for (unsigned typeIndex = 0; typeIndex < NUMBER_OF_TYPED_ARRAY_TYPES; ++typeIndex) {
1963         bool result = handleTypedArrayConstructor(
1964             resultOperand, function, registerOffset, argumentCountIncludingThis,
1965             indexToTypedArrayType(typeIndex));
1966         if (result)
1967             return true;
1968     }
1969     
1970     return false;
1971 }
1972
1973 Node* ByteCodeParser::handleGetByOffset(SpeculatedType prediction, Node* base, const StructureSet& structureSet, unsigned identifierNumber, PropertyOffset offset, NodeType op)
1974 {
1975     if (base->hasConstant()) {
1976         if (JSValue constant = m_graph.tryGetConstantProperty(base->asJSValue(), structureSet, offset)) {
1977             addToGraph(Phantom, base);
1978             return weakJSConstant(constant);
1979         }
1980     }
1981     
1982     Node* propertyStorage;
1983     if (isInlineOffset(offset))
1984         propertyStorage = base;
1985     else
1986         propertyStorage = addToGraph(GetButterfly, base);
1987     
1988     StorageAccessData* data = m_graph.m_storageAccessData.add();
1989     data->offset = offset;
1990     data->identifierNumber = identifierNumber;
1991     
1992     Node* getByOffset = addToGraph(op, OpInfo(data), OpInfo(prediction), propertyStorage, base);
1993
1994     return getByOffset;
1995 }
1996
1997 Node* ByteCodeParser::handlePutByOffset(Node* base, unsigned identifier, PropertyOffset offset, Node* value)
1998 {
1999     Node* propertyStorage;
2000     if (isInlineOffset(offset))
2001         propertyStorage = base;
2002     else
2003         propertyStorage = addToGraph(GetButterfly, base);
2004     
2005     StorageAccessData* data = m_graph.m_storageAccessData.add();
2006     data->offset = offset;
2007     data->identifierNumber = identifier;
2008     
2009     Node* result = addToGraph(PutByOffset, OpInfo(data), propertyStorage, base, value);
2010     
2011     return result;
2012 }
2013
2014 void ByteCodeParser::emitChecks(const ConstantStructureCheckVector& vector)
2015 {
2016     for (unsigned i = 0; i < vector.size(); ++i)
2017         cellConstantWithStructureCheck(vector[i].constant(), vector[i].structure());
2018 }
2019
2020 void ByteCodeParser::handleGetById(
2021     int destinationOperand, SpeculatedType prediction, Node* base, unsigned identifierNumber,
2022     const GetByIdStatus& getByIdStatus)
2023 {
2024     NodeType getById = getByIdStatus.makesCalls() ? GetByIdFlush : GetById;
2025     
2026     if (!getByIdStatus.isSimple() || !Options::enableAccessInlining()) {
2027         set(VirtualRegister(destinationOperand),
2028             addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base));
2029         return;
2030     }
2031     
2032     if (getByIdStatus.numVariants() > 1) {
2033         if (getByIdStatus.makesCalls() || !isFTL(m_graph.m_plan.mode)
2034             || !Options::enablePolymorphicAccessInlining()) {
2035             set(VirtualRegister(destinationOperand),
2036                 addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base));
2037             return;
2038         }
2039         
2040         if (m_graph.compilation())
2041             m_graph.compilation()->noticeInlinedGetById();
2042     
2043         // 1) Emit prototype structure checks for all chains. This could sort of maybe not be
2044         //    optimal, if there is some rarely executed case in the chain that requires a lot
2045         //    of checks and those checks are not watchpointable.
2046         for (unsigned variantIndex = getByIdStatus.numVariants(); variantIndex--;)
2047             emitChecks(getByIdStatus[variantIndex].constantChecks());
2048         
2049         // 2) Emit a MultiGetByOffset
2050         MultiGetByOffsetData* data = m_graph.m_multiGetByOffsetData.add();
2051         data->variants = getByIdStatus.variants();
2052         data->identifierNumber = identifierNumber;
2053         set(VirtualRegister(destinationOperand),
2054             addToGraph(MultiGetByOffset, OpInfo(data), OpInfo(prediction), base));
2055         return;
2056     }
2057     
2058     ASSERT(getByIdStatus.numVariants() == 1);
2059     GetByIdVariant variant = getByIdStatus[0];
2060                 
2061     if (m_graph.compilation())
2062         m_graph.compilation()->noticeInlinedGetById();
2063     
2064     Node* originalBase = base;
2065                 
2066     addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(variant.structureSet())), base);
2067     
2068     emitChecks(variant.constantChecks());
2069
2070     if (variant.alternateBase())
2071         base = weakJSConstant(variant.alternateBase());
2072     
2073     // Unless we want bugs like https://bugs.webkit.org/show_bug.cgi?id=88783, we need to
2074     // ensure that the base of the original get_by_id is kept alive until we're done with
2075     // all of the speculations. We only insert the Phantom if there had been a CheckStructure
2076     // on something other than the base following the CheckStructure on base.
2077     if (originalBase != base)
2078         addToGraph(Phantom, originalBase);
2079     
2080     Node* loadedValue = handleGetByOffset(
2081         variant.callLinkStatus() ? SpecCellOther : prediction,
2082         base, variant.baseStructure(), identifierNumber, variant.offset(),
2083         variant.callLinkStatus() ? GetGetterSetterByOffset : GetByOffset);
2084     
2085     if (!variant.callLinkStatus()) {
2086         set(VirtualRegister(destinationOperand), loadedValue);
2087         return;
2088     }
2089     
2090     Node* getter = addToGraph(GetGetter, loadedValue);
2091     
2092     // Make a call. We don't try to get fancy with using the smallest operand number because
2093     // the stack layout phase should compress the stack anyway.
2094     
2095     unsigned numberOfParameters = 0;
2096     numberOfParameters++; // The 'this' argument.
2097     numberOfParameters++; // True return PC.
2098     
2099     // Start with a register offset that corresponds to the last in-use register.
2100     int registerOffset = virtualRegisterForLocal(
2101         m_inlineStackTop->m_profiledBlock->m_numCalleeRegisters - 1).offset();
2102     registerOffset -= numberOfParameters;
2103     registerOffset -= JSStack::CallFrameHeaderSize;
2104     
2105     // Get the alignment right.
2106     registerOffset = -WTF::roundUpToMultipleOf(
2107         stackAlignmentRegisters(),
2108         -registerOffset);
2109     
2110     ensureLocals(
2111         m_inlineStackTop->remapOperand(
2112             VirtualRegister(registerOffset)).toLocal());
2113     
2114     // Issue SetLocals. This has two effects:
2115     // 1) That's how handleCall() sees the arguments.
2116     // 2) If we inline then this ensures that the arguments are flushed so that if you use
2117     //    the dreaded arguments object on the getter, the right things happen. Well, sort of -
2118     //    since we only really care about 'this' in this case. But we're not going to take that
2119     //    shortcut.
2120     int nextRegister = registerOffset + JSStack::CallFrameHeaderSize;
2121     set(VirtualRegister(nextRegister++), originalBase, ImmediateNakedSet);
2122     
2123     handleCall(
2124         destinationOperand, Call, InlineCallFrame::GetterCall, OPCODE_LENGTH(op_get_by_id),
2125         getter, numberOfParameters - 1, registerOffset, *variant.callLinkStatus(), prediction);
2126 }
2127
2128 void ByteCodeParser::emitPutById(
2129     Node* base, unsigned identifierNumber, Node* value, const PutByIdStatus& putByIdStatus, bool isDirect)
2130 {
2131     if (isDirect)
2132         addToGraph(PutByIdDirect, OpInfo(identifierNumber), base, value);
2133     else
2134         addToGraph(putByIdStatus.makesCalls() ? PutByIdFlush : PutById, OpInfo(identifierNumber), base, value);
2135 }
2136
2137 void ByteCodeParser::handlePutById(
2138     Node* base, unsigned identifierNumber, Node* value,
2139     const PutByIdStatus& putByIdStatus, bool isDirect)
2140 {
2141     if (!putByIdStatus.isSimple() || !Options::enableAccessInlining()) {
2142         if (!putByIdStatus.isSet())
2143             addToGraph(ForceOSRExit);
2144         emitPutById(base, identifierNumber, value, putByIdStatus, isDirect);
2145         return;
2146     }
2147     
2148     if (putByIdStatus.numVariants() > 1) {
2149         if (!isFTL(m_graph.m_plan.mode) || putByIdStatus.makesCalls()
2150             || !Options::enablePolymorphicAccessInlining()) {
2151             emitPutById(base, identifierNumber, value, putByIdStatus, isDirect);
2152             return;
2153         }
2154         
2155         if (m_graph.compilation())
2156             m_graph.compilation()->noticeInlinedPutById();
2157         
2158         if (!isDirect) {
2159             for (unsigned variantIndex = putByIdStatus.numVariants(); variantIndex--;) {
2160                 if (putByIdStatus[variantIndex].kind() != PutByIdVariant::Transition)
2161                     continue;
2162                 emitChecks(putByIdStatus[variantIndex].constantChecks());
2163             }
2164         }
2165         
2166         MultiPutByOffsetData* data = m_graph.m_multiPutByOffsetData.add();
2167         data->variants = putByIdStatus.variants();
2168         data->identifierNumber = identifierNumber;
2169         addToGraph(MultiPutByOffset, OpInfo(data), base, value);
2170         return;
2171     }
2172     
2173     ASSERT(putByIdStatus.numVariants() == 1);
2174     const PutByIdVariant& variant = putByIdStatus[0];
2175     
2176     switch (variant.kind()) {
2177     case PutByIdVariant::Replace: {
2178         addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(variant.structure())), base);
2179         handlePutByOffset(base, identifierNumber, variant.offset(), value);
2180         if (m_graph.compilation())
2181             m_graph.compilation()->noticeInlinedPutById();
2182         return;
2183     }
2184     
2185     case PutByIdVariant::Transition: {
2186         addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(variant.oldStructure())), base);
2187         emitChecks(variant.constantChecks());
2188
2189         ASSERT(variant.oldStructureForTransition()->transitionWatchpointSetHasBeenInvalidated());
2190     
2191         Node* propertyStorage;
2192         Transition* transition = m_graph.m_transitions.add(
2193             variant.oldStructureForTransition(), variant.newStructure());
2194
2195         if (variant.reallocatesStorage()) {
2196
2197             // If we're growing the property storage then it must be because we're
2198             // storing into the out-of-line storage.
2199             ASSERT(!isInlineOffset(variant.offset()));
2200
2201             if (!variant.oldStructureForTransition()->outOfLineCapacity()) {
2202                 propertyStorage = addToGraph(
2203                     AllocatePropertyStorage, OpInfo(transition), base);
2204             } else {
2205                 propertyStorage = addToGraph(
2206                     ReallocatePropertyStorage, OpInfo(transition),
2207                     base, addToGraph(GetButterfly, base));
2208             }
2209         } else {
2210             if (isInlineOffset(variant.offset()))
2211                 propertyStorage = base;
2212             else
2213                 propertyStorage = addToGraph(GetButterfly, base);
2214         }
2215
2216         addToGraph(PutStructure, OpInfo(transition), base);
2217
2218         StorageAccessData* data = m_graph.m_storageAccessData.add();
2219         data->offset = variant.offset();
2220         data->identifierNumber = identifierNumber;
2221         
2222         addToGraph(
2223             PutByOffset,
2224             OpInfo(data),
2225             propertyStorage,
2226             base,
2227             value);
2228
2229         if (m_graph.compilation())
2230             m_graph.compilation()->noticeInlinedPutById();
2231         return;
2232     }
2233         
2234     case PutByIdVariant::Setter: {
2235         Node* originalBase = base;
2236         
2237         addToGraph(
2238             CheckStructure, OpInfo(m_graph.addStructureSet(variant.structure())), base);
2239         
2240         emitChecks(variant.constantChecks());
2241         
2242         if (variant.alternateBase())
2243             base = weakJSConstant(variant.alternateBase());
2244         
2245         Node* loadedValue = handleGetByOffset(
2246             SpecCellOther, base, variant.baseStructure(), identifierNumber, variant.offset(),
2247             GetGetterSetterByOffset);
2248         
2249         Node* setter = addToGraph(GetSetter, loadedValue);
2250         
2251         // Make a call. We don't try to get fancy with using the smallest operand number because
2252         // the stack layout phase should compress the stack anyway.
2253     
2254         unsigned numberOfParameters = 0;
2255         numberOfParameters++; // The 'this' argument.
2256         numberOfParameters++; // The new value.
2257         numberOfParameters++; // True return PC.
2258     
2259         // Start with a register offset that corresponds to the last in-use register.
2260         int registerOffset = virtualRegisterForLocal(
2261             m_inlineStackTop->m_profiledBlock->m_numCalleeRegisters - 1).offset();
2262         registerOffset -= numberOfParameters;
2263         registerOffset -= JSStack::CallFrameHeaderSize;
2264     
2265         // Get the alignment right.
2266         registerOffset = -WTF::roundUpToMultipleOf(
2267             stackAlignmentRegisters(),
2268             -registerOffset);
2269     
2270         ensureLocals(
2271             m_inlineStackTop->remapOperand(
2272                 VirtualRegister(registerOffset)).toLocal());
2273     
2274         int nextRegister = registerOffset + JSStack::CallFrameHeaderSize;
2275         set(VirtualRegister(nextRegister++), originalBase, ImmediateNakedSet);
2276         set(VirtualRegister(nextRegister++), value, ImmediateNakedSet);
2277     
2278         handleCall(
2279             VirtualRegister().offset(), Call, InlineCallFrame::SetterCall,
2280             OPCODE_LENGTH(op_put_by_id), setter, numberOfParameters - 1, registerOffset,
2281             *variant.callLinkStatus(), SpecOther);
2282         return;
2283     }
2284     
2285     default: {
2286         emitPutById(base, identifierNumber, value, putByIdStatus, isDirect);
2287         return;
2288     } }
2289 }
2290
2291 void ByteCodeParser::prepareToParseBlock()
2292 {
2293     clearCaches();
2294     ASSERT(m_setLocalQueue.isEmpty());
2295 }
2296
2297 void ByteCodeParser::clearCaches()
2298 {
2299     m_constants.resize(0);
2300 }
2301
2302 Node* ByteCodeParser::getScope(unsigned skipCount)
2303 {
2304     Node* localBase = get(VirtualRegister(JSStack::ScopeChain));
2305     for (unsigned n = skipCount; n--;)
2306         localBase = addToGraph(SkipScope, localBase);
2307     return localBase;
2308 }
2309
2310 bool ByteCodeParser::parseBlock(unsigned limit)
2311 {
2312     bool shouldContinueParsing = true;
2313
2314     Interpreter* interpreter = m_vm->interpreter;
2315     Instruction* instructionsBegin = m_inlineStackTop->m_codeBlock->instructions().begin();
2316     unsigned blockBegin = m_currentIndex;
2317     
2318     // If we are the first basic block, introduce markers for arguments. This allows
2319     // us to track if a use of an argument may use the actual argument passed, as
2320     // opposed to using a value we set explicitly.
2321     if (m_currentBlock == m_graph.block(0) && !inlineCallFrame()) {
2322         m_graph.m_arguments.resize(m_numArguments);
2323         for (unsigned argument = 0; argument < m_numArguments; ++argument) {
2324             VariableAccessData* variable = newVariableAccessData(
2325                 virtualRegisterForArgument(argument), m_codeBlock->isCaptured(virtualRegisterForArgument(argument)));
2326             variable->mergeStructureCheckHoistingFailed(
2327                 m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
2328             variable->mergeCheckArrayHoistingFailed(
2329                 m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIndexingType));
2330             
2331             Node* setArgument = addToGraph(SetArgument, OpInfo(variable));
2332             m_graph.m_arguments[argument] = setArgument;
2333             m_currentBlock->variablesAtTail.setArgumentFirstTime(argument, setArgument);
2334         }
2335     }
2336
2337     while (true) {
2338         processSetLocalQueue();
2339         
2340         // Don't extend over jump destinations.
2341         if (m_currentIndex == limit) {
2342             // Ordinarily we want to plant a jump. But refuse to do this if the block is
2343             // empty. This is a special case for inlining, which might otherwise create
2344             // some empty blocks in some cases. When parseBlock() returns with an empty
2345             // block, it will get repurposed instead of creating a new one. Note that this
2346             // logic relies on every bytecode resulting in one or more nodes, which would
2347             // be true anyway except for op_loop_hint, which emits a Phantom to force this
2348             // to be true.
2349             if (!m_currentBlock->isEmpty())
2350                 addToGraph(Jump, OpInfo(m_currentIndex));
2351             return shouldContinueParsing;
2352         }
2353         
2354         // Switch on the current bytecode opcode.
2355         Instruction* currentInstruction = instructionsBegin + m_currentIndex;
2356         m_currentInstruction = currentInstruction; // Some methods want to use this, and we'd rather not thread it through calls.
2357         OpcodeID opcodeID = interpreter->getOpcodeID(currentInstruction->u.opcode);
2358         
2359         if (Options::verboseDFGByteCodeParsing())
2360             dataLog("    parsing ", currentCodeOrigin(), "\n");
2361         
2362         if (m_graph.compilation()) {
2363             addToGraph(CountExecution, OpInfo(m_graph.compilation()->executionCounterFor(
2364                 Profiler::OriginStack(*m_vm->m_perBytecodeProfiler, m_codeBlock, currentCodeOrigin()))));
2365         }
2366         
2367         switch (opcodeID) {
2368
2369         // === Function entry opcodes ===
2370
2371         case op_enter: {
2372             Node* undefined = addToGraph(JSConstant, OpInfo(m_constantUndefined));
2373             // Initialize all locals to undefined.
2374             for (int i = 0; i < m_inlineStackTop->m_codeBlock->m_numVars; ++i)
2375                 set(virtualRegisterForLocal(i), undefined, ImmediateNakedSet);
2376             if (m_inlineStackTop->m_codeBlock->specializationKind() == CodeForConstruct)
2377                 set(virtualRegisterForArgument(0), undefined, ImmediateNakedSet);
2378             NEXT_OPCODE(op_enter);
2379         }
2380             
2381         case op_touch_entry:
2382             if (m_inlineStackTop->m_codeBlock->symbolTable()->m_functionEnteredOnce.isStillValid())
2383                 addToGraph(ForceOSRExit);
2384             NEXT_OPCODE(op_touch_entry);
2385             
2386         case op_to_this: {
2387             Node* op1 = getThis();
2388             if (op1->op() != ToThis) {
2389                 Structure* cachedStructure = currentInstruction[2].u.structure.get();
2390                 if (currentInstruction[2].u.toThisStatus != ToThisOK
2391                     || !cachedStructure
2392                     || cachedStructure->classInfo()->methodTable.toThis != JSObject::info()->methodTable.toThis
2393                     || m_inlineStackTop->m_profiledBlock->couldTakeSlowCase(m_currentIndex)
2394                     || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)
2395                     || (op1->op() == GetLocal && op1->variableAccessData()->structureCheckHoistingFailed())) {
2396                     setThis(addToGraph(ToThis, op1));
2397                 } else {
2398                     addToGraph(
2399                         CheckStructure,
2400                         OpInfo(m_graph.addStructureSet(cachedStructure)),
2401                         op1);
2402                 }
2403             }
2404             NEXT_OPCODE(op_to_this);
2405         }
2406
2407         case op_create_this: {
2408             int calleeOperand = currentInstruction[2].u.operand;
2409             Node* callee = get(VirtualRegister(calleeOperand));
2410             bool alreadyEmitted = false;
2411             if (JSFunction* function = callee->dynamicCastConstant<JSFunction*>()) {
2412                 if (Structure* structure = function->allocationStructure()) {
2413                     addToGraph(AllocationProfileWatchpoint, OpInfo(m_graph.freeze(function)));
2414                     // The callee is still live up to this point.
2415                     addToGraph(Phantom, callee);
2416                     set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(NewObject, OpInfo(structure)));
2417                     alreadyEmitted = true;
2418                 }
2419             }
2420             if (!alreadyEmitted) {
2421                 set(VirtualRegister(currentInstruction[1].u.operand),
2422                     addToGraph(CreateThis, OpInfo(currentInstruction[3].u.operand), callee));
2423             }
2424             NEXT_OPCODE(op_create_this);
2425         }
2426
2427         case op_new_object: {
2428             set(VirtualRegister(currentInstruction[1].u.operand),
2429                 addToGraph(NewObject,
2430                     OpInfo(currentInstruction[3].u.objectAllocationProfile->structure())));
2431             NEXT_OPCODE(op_new_object);
2432         }
2433             
2434         case op_new_array: {
2435             int startOperand = currentInstruction[2].u.operand;
2436             int numOperands = currentInstruction[3].u.operand;
2437             ArrayAllocationProfile* profile = currentInstruction[4].u.arrayAllocationProfile;
2438             for (int operandIdx = startOperand; operandIdx > startOperand - numOperands; --operandIdx)
2439                 addVarArgChild(get(VirtualRegister(operandIdx)));
2440             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(Node::VarArg, NewArray, OpInfo(profile->selectIndexingType()), OpInfo(0)));
2441             NEXT_OPCODE(op_new_array);
2442         }
2443             
2444         case op_new_array_with_size: {
2445             int lengthOperand = currentInstruction[2].u.operand;
2446             ArrayAllocationProfile* profile = currentInstruction[3].u.arrayAllocationProfile;
2447             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(NewArrayWithSize, OpInfo(profile->selectIndexingType()), get(VirtualRegister(lengthOperand))));
2448             NEXT_OPCODE(op_new_array_with_size);
2449         }
2450             
2451         case op_new_array_buffer: {
2452             int startConstant = currentInstruction[2].u.operand;
2453             int numConstants = currentInstruction[3].u.operand;
2454             ArrayAllocationProfile* profile = currentInstruction[4].u.arrayAllocationProfile;
2455             NewArrayBufferData data;
2456             data.startConstant = m_inlineStackTop->m_constantBufferRemap[startConstant];
2457             data.numConstants = numConstants;
2458             data.indexingType = profile->selectIndexingType();
2459
2460             // If this statement has never executed, we'll have the wrong indexing type in the profile.
2461             for (int i = 0; i < numConstants; ++i) {
2462                 data.indexingType =
2463                     leastUpperBoundOfIndexingTypeAndValue(
2464                         data.indexingType,
2465                         m_codeBlock->constantBuffer(data.startConstant)[i]);
2466             }
2467             
2468             m_graph.m_newArrayBufferData.append(data);
2469             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(NewArrayBuffer, OpInfo(&m_graph.m_newArrayBufferData.last())));
2470             NEXT_OPCODE(op_new_array_buffer);
2471         }
2472             
2473         case op_new_regexp: {
2474             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(NewRegexp, OpInfo(currentInstruction[2].u.operand)));
2475             NEXT_OPCODE(op_new_regexp);
2476         }
2477             
2478         case op_get_callee: {
2479             JSCell* cachedFunction = currentInstruction[2].u.jsCell.get();
2480             if (!cachedFunction 
2481                 || m_inlineStackTop->m_profiledBlock->couldTakeSlowCase(m_currentIndex)
2482                 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell)) {
2483                 set(VirtualRegister(currentInstruction[1].u.operand), get(VirtualRegister(JSStack::Callee)));
2484             } else {
2485                 FrozenValue* frozen = m_graph.freeze(cachedFunction);
2486                 ASSERT(cachedFunction->inherits(JSFunction::info()));
2487                 Node* actualCallee = get(VirtualRegister(JSStack::Callee));
2488                 addToGraph(CheckCell, OpInfo(frozen), actualCallee);
2489                 set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(JSConstant, OpInfo(frozen)));
2490             }
2491             NEXT_OPCODE(op_get_callee);
2492         }
2493
2494         // === Bitwise operations ===
2495
2496         case op_bitand: {
2497             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2498             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2499             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(BitAnd, op1, op2));
2500             NEXT_OPCODE(op_bitand);
2501         }
2502
2503         case op_bitor: {
2504             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2505             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2506             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(BitOr, op1, op2));
2507             NEXT_OPCODE(op_bitor);
2508         }
2509
2510         case op_bitxor: {
2511             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2512             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2513             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(BitXor, op1, op2));
2514             NEXT_OPCODE(op_bitxor);
2515         }
2516
2517         case op_rshift: {
2518             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2519             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2520             set(VirtualRegister(currentInstruction[1].u.operand),
2521                 addToGraph(BitRShift, op1, op2));
2522             NEXT_OPCODE(op_rshift);
2523         }
2524
2525         case op_lshift: {
2526             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2527             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2528             set(VirtualRegister(currentInstruction[1].u.operand),
2529                 addToGraph(BitLShift, op1, op2));
2530             NEXT_OPCODE(op_lshift);
2531         }
2532
2533         case op_urshift: {
2534             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2535             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2536             set(VirtualRegister(currentInstruction[1].u.operand),
2537                 addToGraph(BitURShift, op1, op2));
2538             NEXT_OPCODE(op_urshift);
2539         }
2540             
2541         case op_unsigned: {
2542             set(VirtualRegister(currentInstruction[1].u.operand),
2543                 makeSafe(addToGraph(UInt32ToNumber, get(VirtualRegister(currentInstruction[2].u.operand)))));
2544             NEXT_OPCODE(op_unsigned);
2545         }
2546
2547         // === Increment/Decrement opcodes ===
2548
2549         case op_inc: {
2550             int srcDst = currentInstruction[1].u.operand;
2551             VirtualRegister srcDstVirtualRegister = VirtualRegister(srcDst);
2552             Node* op = get(srcDstVirtualRegister);
2553             set(srcDstVirtualRegister, makeSafe(addToGraph(ArithAdd, op, addToGraph(JSConstant, OpInfo(m_constantOne)))));
2554             NEXT_OPCODE(op_inc);
2555         }
2556
2557         case op_dec: {
2558             int srcDst = currentInstruction[1].u.operand;
2559             VirtualRegister srcDstVirtualRegister = VirtualRegister(srcDst);
2560             Node* op = get(srcDstVirtualRegister);
2561             set(srcDstVirtualRegister, makeSafe(addToGraph(ArithSub, op, addToGraph(JSConstant, OpInfo(m_constantOne)))));
2562             NEXT_OPCODE(op_dec);
2563         }
2564
2565         // === Arithmetic operations ===
2566
2567         case op_add: {
2568             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2569             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2570             if (op1->hasNumberResult() && op2->hasNumberResult())
2571                 set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithAdd, op1, op2)));
2572             else
2573                 set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ValueAdd, op1, op2)));
2574             NEXT_OPCODE(op_add);
2575         }
2576
2577         case op_sub: {
2578             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2579             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2580             set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithSub, op1, op2)));
2581             NEXT_OPCODE(op_sub);
2582         }
2583
2584         case op_negate: {
2585             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2586             set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithNegate, op1)));
2587             NEXT_OPCODE(op_negate);
2588         }
2589
2590         case op_mul: {
2591             // Multiply requires that the inputs are not truncated, unfortunately.
2592             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2593             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2594             set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithMul, op1, op2)));
2595             NEXT_OPCODE(op_mul);
2596         }
2597
2598         case op_mod: {
2599             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2600             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2601             set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithMod, op1, op2)));
2602             NEXT_OPCODE(op_mod);
2603         }
2604
2605         case op_div: {
2606             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2607             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2608             set(VirtualRegister(currentInstruction[1].u.operand), makeDivSafe(addToGraph(ArithDiv, op1, op2)));
2609             NEXT_OPCODE(op_div);
2610         }
2611
2612         // === Misc operations ===
2613
2614         case op_debug:
2615             addToGraph(Breakpoint);
2616             NEXT_OPCODE(op_debug);
2617
2618         case op_profile_will_call: {
2619             addToGraph(ProfileWillCall);
2620             NEXT_OPCODE(op_profile_will_call);
2621         }
2622
2623         case op_profile_did_call: {
2624             addToGraph(ProfileDidCall);
2625             NEXT_OPCODE(op_profile_did_call);
2626         }
2627
2628         case op_mov: {
2629             Node* op = get(VirtualRegister(currentInstruction[2].u.operand));
2630             set(VirtualRegister(currentInstruction[1].u.operand), op);
2631             NEXT_OPCODE(op_mov);
2632         }
2633
2634         case op_check_has_instance:
2635             addToGraph(CheckHasInstance, get(VirtualRegister(currentInstruction[3].u.operand)));
2636             NEXT_OPCODE(op_check_has_instance);
2637
2638         case op_instanceof: {
2639             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2640             Node* prototype = get(VirtualRegister(currentInstruction[3].u.operand));
2641             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(InstanceOf, value, prototype));
2642             NEXT_OPCODE(op_instanceof);
2643         }
2644             
2645         case op_is_undefined: {
2646             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2647             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsUndefined, value));
2648             NEXT_OPCODE(op_is_undefined);
2649         }
2650
2651         case op_is_boolean: {
2652             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2653             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsBoolean, value));
2654             NEXT_OPCODE(op_is_boolean);
2655         }
2656
2657         case op_is_number: {
2658             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2659             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsNumber, value));
2660             NEXT_OPCODE(op_is_number);
2661         }
2662
2663         case op_is_string: {
2664             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2665             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsString, value));
2666             NEXT_OPCODE(op_is_string);
2667         }
2668
2669         case op_is_object: {
2670             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2671             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsObject, value));
2672             NEXT_OPCODE(op_is_object);
2673         }
2674
2675         case op_is_function: {
2676             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2677             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsFunction, value));
2678             NEXT_OPCODE(op_is_function);
2679         }
2680
2681         case op_not: {
2682             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2683             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, value));
2684             NEXT_OPCODE(op_not);
2685         }
2686             
2687         case op_to_primitive: {
2688             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2689             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(ToPrimitive, value));
2690             NEXT_OPCODE(op_to_primitive);
2691         }
2692             
2693         case op_strcat: {
2694             int startOperand = currentInstruction[2].u.operand;
2695             int numOperands = currentInstruction[3].u.operand;
2696 #if CPU(X86)
2697             // X86 doesn't have enough registers to compile MakeRope with three arguments.
2698             // Rather than try to be clever, we just make MakeRope dumber on this processor.
2699             const unsigned maxRopeArguments = 2;
2700 #else
2701             const unsigned maxRopeArguments = 3;
2702 #endif
2703             auto toStringNodes = std::make_unique<Node*[]>(numOperands);
2704             for (int i = 0; i < numOperands; i++)
2705                 toStringNodes[i] = addToGraph(ToString, get(VirtualRegister(startOperand - i)));
2706
2707             for (int i = 0; i < numOperands; i++)
2708                 addToGraph(Phantom, toStringNodes[i]);
2709
2710             Node* operands[AdjacencyList::Size];
2711             unsigned indexInOperands = 0;
2712             for (unsigned i = 0; i < AdjacencyList::Size; ++i)
2713                 operands[i] = 0;
2714             for (int operandIdx = 0; operandIdx < numOperands; ++operandIdx) {
2715                 if (indexInOperands == maxRopeArguments) {
2716                     operands[0] = addToGraph(MakeRope, operands[0], operands[1], operands[2]);
2717                     for (unsigned i = 1; i < AdjacencyList::Size; ++i)
2718                         operands[i] = 0;
2719                     indexInOperands = 1;
2720                 }
2721                 
2722                 ASSERT(indexInOperands < AdjacencyList::Size);
2723                 ASSERT(indexInOperands < maxRopeArguments);
2724                 operands[indexInOperands++] = toStringNodes[operandIdx];
2725             }
2726             set(VirtualRegister(currentInstruction[1].u.operand),
2727                 addToGraph(MakeRope, operands[0], operands[1], operands[2]));
2728             NEXT_OPCODE(op_strcat);
2729         }
2730
2731         case op_less: {
2732             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2733             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2734             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareLess, op1, op2));
2735             NEXT_OPCODE(op_less);
2736         }
2737
2738         case op_lesseq: {
2739             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2740             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2741             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareLessEq, op1, op2));
2742             NEXT_OPCODE(op_lesseq);
2743         }
2744
2745         case op_greater: {
2746             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2747             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2748             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareGreater, op1, op2));
2749             NEXT_OPCODE(op_greater);
2750         }
2751
2752         case op_greatereq: {
2753             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2754             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2755             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareGreaterEq, op1, op2));
2756             NEXT_OPCODE(op_greatereq);
2757         }
2758
2759         case op_eq: {
2760             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2761             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2762             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareEq, op1, op2));
2763             NEXT_OPCODE(op_eq);
2764         }
2765
2766         case op_eq_null: {
2767             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2768             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareEqConstant, value, addToGraph(JSConstant, OpInfo(m_constantNull))));
2769             NEXT_OPCODE(op_eq_null);
2770         }
2771
2772         case op_stricteq: {
2773             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2774             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2775             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareStrictEq, op1, op2));
2776             NEXT_OPCODE(op_stricteq);
2777         }
2778
2779         case op_neq: {
2780             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2781             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2782             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, addToGraph(CompareEq, op1, op2)));
2783             NEXT_OPCODE(op_neq);
2784         }
2785
2786         case op_neq_null: {
2787             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2788             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, addToGraph(CompareEqConstant, value, addToGraph(JSConstant, OpInfo(m_constantNull)))));
2789             NEXT_OPCODE(op_neq_null);
2790         }
2791
2792         case op_nstricteq: {
2793             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2794             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2795             Node* invertedResult;
2796             invertedResult = addToGraph(CompareStrictEq, op1, op2);
2797             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, invertedResult));
2798             NEXT_OPCODE(op_nstricteq);
2799         }
2800
2801         // === Property access operations ===
2802
2803         case op_get_by_val: {
2804             SpeculatedType prediction = getPredictionWithoutOSRExit();
2805             
2806             Node* base = get(VirtualRegister(currentInstruction[2].u.operand));
2807             ArrayMode arrayMode = getArrayModeConsideringSlowPath(currentInstruction[4].u.arrayProfile, Array::Read);
2808             Node* property = get(VirtualRegister(currentInstruction[3].u.operand));
2809             Node* getByVal = addToGraph(GetByVal, OpInfo(arrayMode.asWord()), OpInfo(prediction), base, property);
2810             set(VirtualRegister(currentInstruction[1].u.operand), getByVal);
2811
2812             NEXT_OPCODE(op_get_by_val);
2813         }
2814
2815         case op_put_by_val_direct:
2816         case op_put_by_val: {
2817             Node* base = get(VirtualRegister(currentInstruction[1].u.operand));
2818
2819             ArrayMode arrayMode = getArrayModeConsideringSlowPath(currentInstruction[4].u.arrayProfile, Array::Write);
2820             
2821             Node* property = get(VirtualRegister(currentInstruction[2].u.operand));
2822             Node* value = get(VirtualRegister(currentInstruction[3].u.operand));
2823             
2824             addVarArgChild(base);
2825             addVarArgChild(property);
2826             addVarArgChild(value);
2827             addVarArgChild(0); // Leave room for property storage.
2828             addVarArgChild(0); // Leave room for length.
2829             addToGraph(Node::VarArg, opcodeID == op_put_by_val_direct ? PutByValDirect : PutByVal, OpInfo(arrayMode.asWord()), OpInfo(0));
2830
2831             NEXT_OPCODE(op_put_by_val);
2832         }
2833             
2834         case op_get_by_id:
2835         case op_get_by_id_out_of_line:
2836         case op_get_array_length: {
2837             SpeculatedType prediction = getPrediction();
2838             
2839             Node* base = get(VirtualRegister(currentInstruction[2].u.operand));
2840             unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand];
2841             
2842             StringImpl* uid = m_graph.identifiers()[identifierNumber];
2843             GetByIdStatus getByIdStatus = GetByIdStatus::computeFor(
2844                 m_inlineStackTop->m_profiledBlock, m_dfgCodeBlock,
2845                 m_inlineStackTop->m_stubInfos, m_dfgStubInfos,
2846                 currentCodeOrigin(), uid);
2847             
2848             handleGetById(
2849                 currentInstruction[1].u.operand, prediction, base, identifierNumber, getByIdStatus);
2850
2851             NEXT_OPCODE(op_get_by_id);
2852         }
2853         case op_put_by_id:
2854         case op_put_by_id_out_of_line:
2855         case op_put_by_id_transition_direct:
2856         case op_put_by_id_transition_normal:
2857         case op_put_by_id_transition_direct_out_of_line:
2858         case op_put_by_id_transition_normal_out_of_line: {
2859             Node* value = get(VirtualRegister(currentInstruction[3].u.operand));
2860             Node* base = get(VirtualRegister(currentInstruction[1].u.operand));
2861             unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
2862             bool direct = currentInstruction[8].u.operand;
2863
2864             PutByIdStatus putByIdStatus = PutByIdStatus::computeFor(
2865                 m_inlineStackTop->m_profiledBlock, m_dfgCodeBlock,
2866                 m_inlineStackTop->m_stubInfos, m_dfgStubInfos,
2867                 currentCodeOrigin(), m_graph.identifiers()[identifierNumber]);
2868             
2869             handlePutById(base, identifierNumber, value, putByIdStatus, direct);
2870             NEXT_OPCODE(op_put_by_id);
2871         }
2872
2873         case op_init_global_const_nop: {
2874             NEXT_OPCODE(op_init_global_const_nop);
2875         }
2876
2877         case op_init_global_const: {
2878             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2879             addToGraph(
2880                 PutGlobalVar,
2881                 OpInfo(m_inlineStackTop->m_codeBlock->globalObject()->assertRegisterIsInThisObject(currentInstruction[1].u.registerPointer)),
2882                 value);
2883             NEXT_OPCODE(op_init_global_const);
2884         }
2885
2886         case op_profile_type: {
2887             Node* valueToProfile = get(VirtualRegister(currentInstruction[1].u.operand));
2888             addToGraph(ProfileType, OpInfo(currentInstruction[2].u.location), valueToProfile);
2889             NEXT_OPCODE(op_profile_type);
2890         }
2891
2892         // === Block terminators. ===
2893
2894         case op_jmp: {
2895             int relativeOffset = currentInstruction[1].u.operand;
2896             if (relativeOffset <= 0)
2897                 flushForTerminal();
2898             addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
2899             LAST_OPCODE(op_jmp);
2900         }
2901
2902         case op_jtrue: {
2903             unsigned relativeOffset = currentInstruction[2].u.operand;
2904             Node* condition = get(VirtualRegister(currentInstruction[1].u.operand));
2905             addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jtrue))), condition);
2906             LAST_OPCODE(op_jtrue);
2907         }
2908
2909         case op_jfalse: {
2910             unsigned relativeOffset = currentInstruction[2].u.operand;
2911             Node* condition = get(VirtualRegister(currentInstruction[1].u.operand));
2912             addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jfalse), m_currentIndex + relativeOffset)), condition);
2913             LAST_OPCODE(op_jfalse);
2914         }
2915
2916         case op_jeq_null: {
2917             unsigned relativeOffset = currentInstruction[2].u.operand;
2918             Node* value = get(VirtualRegister(currentInstruction[1].u.operand));
2919             Node* condition = addToGraph(CompareEqConstant, value, addToGraph(JSConstant, OpInfo(m_constantNull)));
2920             addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jeq_null))), condition);
2921             LAST_OPCODE(op_jeq_null);
2922         }
2923
2924         case op_jneq_null: {
2925             unsigned relativeOffset = currentInstruction[2].u.operand;
2926             Node* value = get(VirtualRegister(currentInstruction[1].u.operand));
2927             Node* condition = addToGraph(CompareEqConstant, value, addToGraph(JSConstant, OpInfo(m_constantNull)));
2928             addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jneq_null), m_currentIndex + relativeOffset)), condition);
2929             LAST_OPCODE(op_jneq_null);
2930         }
2931
2932         case op_jless: {
2933             unsigned relativeOffset = currentInstruction[3].u.operand;
2934             Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
2935             Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
2936             Node* condition = addToGraph(CompareLess, op1, op2);
2937             addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jless))), condition);
2938             LAST_OPCODE(op_jless);
2939         }
2940
2941         case op_jlesseq: {
2942             unsigned relativeOffset = currentInstruction[3].u.operand;
2943             Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
2944             Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
2945             Node* condition = addToGraph(CompareLessEq, op1, op2);
2946             addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jlesseq))), condition);
2947             LAST_OPCODE(op_jlesseq);
2948         }
2949
2950         case op_jgreater: {
2951             unsigned relativeOffset = currentInstruction[3].u.operand;
2952             Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
2953             Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
2954             Node* condition = addToGraph(CompareGreater, op1, op2);
2955             addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jgreater))), condition);
2956             LAST_OPCODE(op_jgreater);
2957         }
2958
2959         case op_jgreatereq: {
2960             unsigned relativeOffset = currentInstruction[3].u.operand;
2961             Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
2962             Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
2963             Node* condition = addToGraph(CompareGreaterEq, op1, op2);
2964             addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jgreatereq))), condition);
2965             LAST_OPCODE(op_jgreatereq);
2966         }
2967
2968         case op_jnless: {
2969             unsigned relativeOffset = currentInstruction[3].u.operand;
2970             Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
2971             Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
2972             Node* condition = addToGraph(CompareLess, op1, op2);
2973             addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jnless), m_currentIndex + relativeOffset)), condition);
2974             LAST_OPCODE(op_jnless);
2975         }
2976
2977         case op_jnlesseq: {
2978             unsigned relativeOffset = currentInstruction[3].u.operand;
2979             Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
2980             Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
2981             Node* condition = addToGraph(CompareLessEq, op1, op2);
2982             addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jnlesseq), m_currentIndex + relativeOffset)), condition);
2983             LAST_OPCODE(op_jnlesseq);
2984         }
2985
2986         case op_jngreater: {
2987             unsigned relativeOffset = currentInstruction[3].u.operand;
2988             Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
2989             Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
2990             Node* condition = addToGraph(CompareGreater, op1, op2);
2991             addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jngreater), m_currentIndex + relativeOffset)), condition);
2992             LAST_OPCODE(op_jngreater);
2993         }
2994
2995         case op_jngreatereq: {
2996             unsigned relativeOffset = currentInstruction[3].u.operand;
2997             Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
2998             Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
2999             Node* condition = addToGraph(CompareGreaterEq, op1, op2);
3000             addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jngreatereq), m_currentIndex + relativeOffset)), condition);
3001             LAST_OPCODE(op_jngreatereq);
3002         }
3003             
3004         case op_switch_imm: {
3005             SwitchData& data = *m_graph.m_switchData.add();
3006             data.kind = SwitchImm;
3007             data.switchTableIndex = m_inlineStackTop->m_switchRemap[currentInstruction[1].u.operand];
3008             data.fallThrough.setBytecodeIndex(m_currentIndex + currentInstruction[2].u.operand);
3009             SimpleJumpTable& table = m_codeBlock->switchJumpTable(data.switchTableIndex);
3010             for (unsigned i = 0; i < table.branchOffsets.size(); ++i) {
3011                 if (!table.branchOffsets[i])
3012                     continue;
3013                 unsigned target = m_currentIndex + table.branchOffsets[i];
3014                 if (target == data.fallThrough.bytecodeIndex())
3015                     continue;
3016                 data.cases.append(SwitchCase::withBytecodeIndex(m_graph.freeze(jsNumber(static_cast<int32_t>(table.min + i))), target));
3017             }
3018             flushIfTerminal(data);
3019             addToGraph(Switch, OpInfo(&data), get(VirtualRegister(currentInstruction[3].u.operand)));
3020             LAST_OPCODE(op_switch_imm);
3021         }
3022             
3023         case op_switch_char: {
3024             SwitchData& data = *m_graph.m_switchData.add();
3025             data.kind = SwitchChar;
3026             data.switchTableIndex = m_inlineStackTop->m_switchRemap[currentInstruction[1].u.operand];
3027             data.fallThrough.setBytecodeIndex(m_currentIndex + currentInstruction[2].u.operand);
3028             SimpleJumpTable& table = m_codeBlock->switchJumpTable(data.switchTableIndex);
3029             for (unsigned i = 0; i < table.branchOffsets.size(); ++i) {
3030                 if (!table.branchOffsets[i])
3031                     continue;
3032                 unsigned target = m_currentIndex + table.branchOffsets[i];
3033                 if (target == data.fallThrough.bytecodeIndex())
3034                     continue;
3035                 data.cases.append(
3036                     SwitchCase::withBytecodeIndex(LazyJSValue::singleCharacterString(table.min + i), target));
3037             }
3038             flushIfTerminal(data);
3039             addToGraph(Switch, OpInfo(&data), get(VirtualRegister(currentInstruction[3].u.operand)));
3040             LAST_OPCODE(op_switch_char);
3041         }
3042
3043         case op_switch_string: {
3044             SwitchData& data = *m_graph.m_switchData.add();
3045             data.kind = SwitchString;
3046             data.switchTableIndex = currentInstruction[1].u.operand;
3047             data.fallThrough.setBytecodeIndex(m_currentIndex + currentInstruction[2].u.operand);
3048             StringJumpTable& table = m_codeBlock->stringSwitchJumpTable(data.switchTableIndex);
3049             StringJumpTable::StringOffsetTable::iterator iter;
3050             StringJumpTable::StringOffsetTable::iterator end = table.offsetTable.end();
3051             for (iter = table.offsetTable.begin(); iter != end; ++iter) {
3052                 unsigned target = m_currentIndex + iter->value.branchOffset;
3053                 if (target == data.fallThrough.bytecodeIndex())
3054                     continue;
3055                 data.cases.append(
3056                     SwitchCase::withBytecodeIndex(LazyJSValue::knownStringImpl(iter->key.get()), target));
3057             }
3058             flushIfTerminal(data);
3059             addToGraph(Switch, OpInfo(&data), get(VirtualRegister(currentInstruction[3].u.operand)));
3060             LAST_OPCODE(op_switch_string);
3061         }
3062
3063         case op_ret:
3064             flushForReturn();
3065             if (inlineCallFrame()) {
3066                 if (m_inlineStackTop->m_returnValue.isValid())
3067                     setDirect(m_inlineStackTop->m_returnValue, get(VirtualRegister(currentInstruction[1].u.operand)), ImmediateSetWithFlush);
3068                 m_inlineStackTop->m_didReturn = true;
3069                 if (m_inlineStackTop->m_unlinkedBlocks.isEmpty()) {
3070                     // If we're returning from the first block, then we're done parsing.
3071                     ASSERT(m_inlineStackTop->m_callsiteBlockHead == m_graph.lastBlock());
3072                     shouldContinueParsing = false;
3073                     LAST_OPCODE(op_ret);
3074                 } else {
3075                     // If inlining created blocks, and we're doing a return, then we need some
3076                     // special linking.
3077                     ASSERT(m_inlineStackTop->m_unlinkedBlocks.last().m_block == m_graph.lastBlock());
3078                     m_inlineStackTop->m_unlinkedBlocks.last().m_needsNormalLinking = false;
3079                 }
3080                 if (m_currentIndex + OPCODE_LENGTH(op_ret) != m_inlineStackTop->m_codeBlock->instructions().size() || m_inlineStackTop->m_didEarlyReturn) {
3081                     ASSERT(m_currentIndex + OPCODE_LENGTH(op_ret) <= m_inlineStackTop->m_codeBlock->instructions().size());
3082                     addToGraph(Jump, OpInfo(0));
3083                     m_inlineStackTop->m_unlinkedBlocks.last().m_needsEarlyReturnLinking = true;
3084                     m_inlineStackTop->m_didEarlyReturn = true;
3085                 }
3086                 LAST_OPCODE(op_ret);
3087             }
3088             addToGraph(Return, get(VirtualRegister(currentInstruction[1].u.operand)));
3089             LAST_OPCODE(op_ret);
3090             
3091         case op_end:
3092             flushForReturn();
3093             ASSERT(!inlineCallFrame());
3094             addToGraph(Return, get(VirtualRegister(currentInstruction[1].u.operand)));
3095             LAST_OPCODE(op_end);
3096
3097         case op_throw:
3098             addToGraph(Throw, get(VirtualRegister(currentInstruction[1].u.operand)));
3099             flushForTerminal();
3100             addToGraph(Unreachable);
3101             LAST_OPCODE(op_throw);
3102             
3103         case op_throw_static_error:
3104             addToGraph(ThrowReferenceError);
3105             flushForTerminal();
3106             addToGraph(Unreachable);
3107             LAST_OPCODE(op_throw_static_error);
3108             
3109         case op_call:
3110             handleCall(currentInstruction, Call, CodeForCall);
3111             NEXT_OPCODE(op_call);
3112             
3113         case op_construct:
3114             handleCall(currentInstruction, Construct, CodeForConstruct);
3115             NEXT_OPCODE(op_construct);
3116             
3117         case op_call_varargs: {
3118             int result = currentInstruction[1].u.operand;
3119             int callee = currentInstruction[2].u.operand;
3120             int thisReg = currentInstruction[3].u.operand;
3121             int arguments = currentInstruction[4].u.operand;
3122             int firstFreeReg = currentInstruction[5].u.operand;
3123             
3124             ASSERT(inlineCallFrame());
3125             ASSERT_UNUSED(arguments, arguments == m_inlineStackTop->m_codeBlock->argumentsRegister().offset());
3126             ASSERT(!m_inlineStackTop->m_codeBlock->symbolTable()->slowArguments());
3127
3128             addToGraph(CheckArgumentsNotCreated);
3129
3130             unsigned argCount = inlineCallFrame()->arguments.size();
3131             
3132             // Let's compute the register offset. We start with the last used register, and
3133             // then adjust for the things we want in the call frame.
3134             int registerOffset = firstFreeReg + 1;
3135             registerOffset -= argCount; // We will be passing some arguments.
3136             registerOffset -= JSStack::CallFrameHeaderSize; // We will pretend to have a call frame header.
3137             
3138             // Get the alignment right.
3139             registerOffset = -WTF::roundUpToMultipleOf(
3140                 stackAlignmentRegisters(),
3141                 -registerOffset);
3142
3143             ensureLocals(
3144                 m_inlineStackTop->remapOperand(
3145                     VirtualRegister(registerOffset)).toLocal());
3146             
3147             // The bytecode wouldn't have set up the arguments. But we'll do it and make it
3148             // look like the bytecode had done it.
3149             int nextRegister = registerOffset + JSStack::CallFrameHeaderSize;
3150             set(VirtualRegister(nextRegister++), get(VirtualRegister(thisReg)), ImmediateNakedSet);
3151             for (unsigned argument = 1; argument < argCount; ++argument)
3152                 set(VirtualRegister(nextRegister++), get(virtualRegisterForArgument(argument)), ImmediateNakedSet);
3153             
3154             handleCall(
3155                 result, Call, CodeForCall, OPCODE_LENGTH(op_call_varargs),
3156                 callee, argCount, registerOffset);
3157             NEXT_OPCODE(op_call_varargs);
3158         }
3159             
3160         case op_jneq_ptr:
3161             // Statically speculate for now. It makes sense to let speculate-only jneq_ptr
3162             // support simmer for a while before making it more general, since it's
3163             // already gnarly enough as it is.
3164             ASSERT(pointerIsFunction(currentInstruction[2].u.specialPointer));
3165             addToGraph(
3166                 CheckCell,
3167                 OpInfo(m_graph.freeze(static_cast<JSCell*>(actualPointerFor(
3168                     m_inlineStackTop->m_codeBlock, currentInstruction[2].u.specialPointer)))),
3169                 get(VirtualRegister(currentInstruction[1].u.operand)));
3170             addToGraph(Jump, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jneq_ptr)));
3171             LAST_OPCODE(op_jneq_ptr);
3172
3173         case op_resolve_scope: {
3174             int dst = currentInstruction[1].u.operand;
3175             ResolveType resolveType = static_cast<ResolveType>(currentInstruction[3].u.operand);
3176             unsigned depth = currentInstruction[4].u.operand;
3177
3178             // get_from_scope and put_to_scope depend on this watchpoint forcing OSR exit, so they don't add their own watchpoints.
3179             if (needsVarInjectionChecks(resolveType))
3180                 addToGraph(VarInjectionWatchpoint);
3181
3182             switch (resolveType) {
3183             case GlobalProperty:
3184             case GlobalVar:
3185             case GlobalPropertyWithVarInjectionChecks:
3186             case GlobalVarWithVarInjectionChecks:
3187                 set(VirtualRegister(dst), weakJSConstant(m_inlineStackTop->m_codeBlock->globalObject()));
3188                 break;
3189             case LocalClosureVar:
3190             case ClosureVar:
3191             case ClosureVarWithVarInjectionChecks: {
3192                 JSLexicalEnvironment* lexicalEnvironment = currentInstruction[5].u.lexicalEnvironment.get();
3193                 if (lexicalEnvironment
3194                     && lexicalEnvironment->symbolTable()->m_functionEnteredOnce.isStillValid()) {
3195                     addToGraph(FunctionReentryWatchpoint, OpInfo(lexicalEnvironment->symbolTable()));
3196                     set(VirtualRegister(dst), weakJSConstant(lexicalEnvironment));
3197                     break;
3198                 }
3199                 set(VirtualRegister(dst), getScope(depth));
3200                 break;
3201             }
3202             case Dynamic:
3203                 RELEASE_ASSERT_NOT_REACHED();
3204                 break;
3205             }
3206             NEXT_OPCODE(op_resolve_scope);
3207         }
3208
3209         case op_get_from_scope: {
3210             int dst = currentInstruction[1].u.operand;
3211             int scope = currentInstruction[2].u.operand;
3212             unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand];
3213             StringImpl* uid = m_graph.identifiers()[identifierNumber];
3214             ResolveType resolveType = ResolveModeAndType(currentInstruction[4].u.operand).type();
3215
3216             Structure* structure = 0;
3217             WatchpointSet* watchpoints = 0;
3218             uintptr_t operand;
3219             {
3220                 ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
3221                 if (resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks)
3222                     watchpoints = currentInstruction[5].u.watchpointSet;
3223                 else
3224                     structure = currentInstruction[5].u.structure.get();
3225                 operand = reinterpret_cast<uintptr_t>(currentInstruction[6].u.pointer);
3226             }
3227
3228             UNUSED_PARAM(watchpoints); // We will use this in the future. For now we set it as a way of documenting the fact that that's what index 5 is in GlobalVar mode.
3229
3230             JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
3231
3232             switch (resolveType) {
3233             case GlobalProperty:
3234             case GlobalPropertyWithVarInjectionChecks: {
3235                 SpeculatedType prediction = getPrediction();
3236                 GetByIdStatus status = GetByIdStatus::computeFor(structure, uid);
3237                 if (status.state() != GetByIdStatus::Simple
3238                     || status.numVariants() != 1
3239                     || status[0].structureSet().size() != 1) {
3240                     set(VirtualRegister(dst), addToGraph(GetByIdFlush, OpInfo(identifierNumber), OpInfo(prediction), get(VirtualRegister(scope))));
3241                     break;
3242                 }
3243                 Node* base = cellConstantWithStructureCheck(globalObject, status[0].structureSet().onlyStructure());
3244                 addToGraph(Phantom, get(VirtualRegister(scope)));
3245                 set(VirtualRegister(dst), handleGetByOffset(prediction, base, status[0].structureSet(), identifierNumber, operand));
3246                 break;
3247             }
3248             case GlobalVar:
3249             case GlobalVarWithVarInjectionChecks: {
3250                 addToGraph(Phantom, get(VirtualRegister(scope)));
3251                 SymbolTableEntry entry = globalObject->symbolTable()->get(uid);
3252                 VariableWatchpointSet* watchpointSet = entry.watchpointSet();
3253                 JSValue inferredValue =
3254                     watchpointSet ? watchpointSet->inferredValue() : JSValue();
3255                 if (!inferredValue) {
3256                     SpeculatedType prediction = getPrediction();
3257                     set(VirtualRegister(dst), addToGraph(GetGlobalVar, OpInfo(operand), OpInfo(prediction)));
3258                     break;
3259                 }
3260                 
3261                 addToGraph(VariableWatchpoint, OpInfo(watchpointSet));
3262                 set(VirtualRegister(dst), weakJSConstant(inferredValue));
3263                 break;
3264             }
3265             case LocalClosureVar:
3266             case ClosureVar:
3267             case ClosureVarWithVarInjectionChecks: {
3268                 Node* scopeNode = get(VirtualRegister(scope));
3269                 if (JSLexicalEnvironment* lexicalEnvironment = m_graph.tryGetActivation(scopeNode)) {
3270                     SymbolTable* symbolTable = lexicalEnvironment->symbolTable();
3271                     ConcurrentJITLocker locker(symbolTable->m_lock);
3272                     SymbolTable::Map::iterator iter = symbolTable->find(locker, uid);
3273                     ASSERT(iter != symbolTable->end(locker));
3274                     VariableWatchpointSet* watchpointSet = iter->value.watchpointSet();
3275                     if (watchpointSet) {
3276                         if (JSValue value = watchpointSet->inferredValue()) {
3277                             addToGraph(Phantom, scopeNode);
3278                             addToGraph(VariableWatchpoint, OpInfo(watchpointSet));
3279                             set(VirtualRegister(dst), weakJSConstant(value));
3280                             break;
3281                         }
3282                     }
3283                 }
3284                 SpeculatedType prediction = getPrediction();
3285                 set(VirtualRegister(dst),
3286                     addToGraph(GetClosureVar, OpInfo(operand), OpInfo(prediction), 
3287                         addToGraph(GetClosureRegisters, scopeNode)));
3288                 break;
3289             }
3290             case Dynamic:
3291                 RELEASE_ASSERT_NOT_REACHED();
3292                 break;
3293             }
3294             NEXT_OPCODE(op_get_from_scope);
3295         }
3296
3297         case op_put_to_scope: {
3298             unsigned scope = currentInstruction[1].u.operand;
3299             unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
3300             unsigned value = currentInstruction[3].u.operand;
3301             ResolveType resolveType = ResolveModeAndType(currentInstruction[4].u.operand).type();
3302             StringImpl* uid = m_graph.identifiers()[identifierNumber];
3303
3304             Structure* structure = 0;
3305             VariableWatchpointSet* watchpoints = 0;
3306             uintptr_t operand;
3307             {
3308                 ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
3309                 if (resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks || resolveType == LocalClosureVar)
3310                     watchpoints = currentInstruction[5].u.watchpointSet;
3311                 else
3312                     structure = currentInstruction[5].u.structure.get();
3313                 operand = reinterpret_cast<uintptr_t>(currentInstruction[6].u.pointer);
3314             }
3315
3316             JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
3317
3318             switch (resolveType) {
3319             case GlobalProperty:
3320             case GlobalPropertyWithVarInjectionChecks: {
3321                 PutByIdStatus status = PutByIdStatus::computeFor(globalObject, structure, uid, false);
3322                 if (status.numVariants() != 1
3323                     || status[0].kind() != PutByIdVariant::Replace
3324                     || status[0].structure().size() != 1) {
3325                     addToGraph(PutById, OpInfo(identifierNumber), get(VirtualRegister(scope)), get(VirtualRegister(value)));
3326                     break;
3327                 }
3328                 ASSERT(status[0].structure().onlyStructure() == structure);
3329                 Node* base = cellConstantWithStructureCheck(globalObject, structure);
3330                 addToGraph(Phantom, get(VirtualRegister(scope)));
3331                 handlePutByOffset(base, identifierNumber, static_cast<PropertyOffset>(operand), get(VirtualRegister(value)));
3332                 // Keep scope alive until after put.
3333                 addToGraph(Phantom, get(VirtualRegister(scope)));
3334                 break;
3335             }
3336             case GlobalVar:
3337             case GlobalVarWithVarInjectionChecks: {
3338                 SymbolTableEntry entry = globalObject->symbolTable()->get(uid);
3339                 ASSERT(watchpoints == entry.watchpointSet());
3340                 Node* valueNode = get(VirtualRegister(value));
3341                 addToGraph(PutGlobalVar, OpInfo(operand), valueNode);
3342                 if (watchpoints->state() != IsInvalidated)
3343                     addToGraph(NotifyWrite, OpInfo(watchpoints), valueNode);
3344                 // Keep scope alive until after put.
3345                 addToGraph(Phantom, get(VirtualRegister(scope)));
3346                 break;
3347             }
3348             case LocalClosureVar:
3349             case ClosureVar:
3350             case ClosureVarWithVarInjectionChecks: {
3351                 Node* scopeNode = get(VirtualRegister(scope));
3352                 Node* scopeRegisters = addToGraph(GetClosureRegisters, scopeNode);
3353                 Node* valueNode = get(VirtualRegister(value));
3354
3355                 if (watchpoints && watchpoints->state() != IsInvalidated)
3356                     addToGraph(NotifyWrite, OpInfo(watchpoints), valueNode);
3357
3358                 addToGraph(PutClosureVar, OpInfo(operand), scopeNode, scopeRegisters, valueNode);
3359                 break;
3360             }
3361             case Dynamic:
3362                 RELEASE_ASSERT_NOT_REACHED();
3363                 break;
3364             }
3365             NEXT_OPCODE(op_put_to_scope);
3366         }
3367
3368         case op_loop_hint: {
3369             // Baseline->DFG OSR jumps between loop hints. The DFG assumes that Baseline->DFG
3370             // OSR can only happen at basic block boundaries. Assert that these two statements
3371             // are compatible.
3372             RELEASE_ASSERT(m_currentIndex == blockBegin);
3373             
3374             // We never do OSR into an inlined code block. That could not happen, since OSR
3375             // looks up the code block that is the replacement for the baseline JIT code
3376             // block. Hence, machine code block = true code block = not inline code block.
3377             if (!m_inlineStackTop->m_caller)
3378                 m_currentBlock->isOSRTarget = true;
3379
3380             addToGraph(LoopHint);
3381             
3382             if (m_vm->watchdog && m_vm->watchdog->isEnabled())
3383                 addToGraph(CheckWatchdogTimer);
3384             
3385             NEXT_OPCODE(op_loop_hint);
3386         }
3387             
3388         case op_init_lazy_reg: {
3389             set(VirtualRegister(currentInstruction[1].u.operand), jsConstant(JSValue()));
3390             ASSERT(operandIsLocal(currentInstruction[1].u.operand));
3391             m_graph.m_lazyVars.set(VirtualRegister(currentInstruction[1].u.operand).toLocal());
3392             NEXT_OPCODE(op_init_lazy_reg);
3393         }
3394             
3395         case op_create_lexical_environment: {
3396             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CreateActivation, get(VirtualRegister(currentInstruction[1].u.operand))));
3397             NEXT_OPCODE(op_create_lexical_environment);
3398         }
3399             
3400         case op_create_arguments: {
3401             m_graph.m_hasArguments = true;
3402             Node* createArguments = addToGraph(CreateArguments, get(VirtualRegister(currentInstruction[1].u.operand)));
3403             set(VirtualRegister(currentInstruction[1].u.operand), createArguments);
3404             set(unmodifiedArgumentsRegister(VirtualRegister(currentInstruction[1].u.operand)), createArguments);
3405             NEXT_OPCODE(op_create_arguments);
3406         }
3407
3408         case op_tear_off_arguments: {
3409             m_graph.m_hasArguments = true;
3410             addToGraph(TearOffArguments, get(VirtualRegister(currentInstruction[1].u.operand)), get(VirtualRegister(currentInstruction[2].u.operand)));
3411             NEXT_OPCODE(op_tear_off_arguments);
3412         }
3413             
3414         case op_get_arguments_length: {
3415             m_graph.m_hasArguments = true;
3416             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(GetMyArgumentsLengthSafe));
3417             NEXT_OPCODE(op_get_arguments_length);
3418         }
3419             
3420         case op_get_argument_by_val: {
3421             m_graph.m_hasArguments = true;
3422             set(VirtualRegister(currentInstruction[1].u.operand),
3423                 addToGraph(
3424                     GetMyArgumentByValSafe, OpInfo(0), OpInfo(getPrediction()),
3425                     get(VirtualRegister(currentInstruction[3].u.operand))));
3426             NEXT_OPCODE(op_get_argument_by_val);
3427         }
3428             
3429         case op_new_func: {
3430             if (!currentInstruction[3].u.operand) {
3431                 set(VirtualRegister(currentInstruction[1].u.operand),
3432                     addToGraph(NewFunctionNoCheck, OpInfo(currentInstruction[2].u.operand)));
3433             } else {
3434                 set(VirtualRegister(currentInstruction[1].u.operand),
3435                     addToGraph(
3436                         NewFunction,
3437                         OpInfo(currentInstruction[2].u.operand),
3438                         get(VirtualRegister(currentInstruction[1].u.operand))));
3439             }
3440             NEXT_OPCODE(op_new_func);
3441         }
3442             
3443         case op_new_captured_func: {
3444             Node* function = addToGraph(
3445                 NewFunctionNoCheck, OpInfo(currentInstruction[2].u.operand));
3446             if (VariableWatchpointSet* set = currentInstruction[3].u.watchpointSet)
3447                 addToGraph(NotifyWrite, OpInfo(set), function);
3448             set(VirtualRegister(currentInstruction[1].u.operand), function);
3449             NEXT_OPCODE(op_new_captured_func);
3450         }
3451             
3452         case op_new_func_exp: {
3453             set(VirtualRegister(currentInstruction[1].u.operand),
3454                 addToGraph(NewFunctionExpression, OpInfo(currentInstruction[2].u.operand)));
3455             NEXT_OPCODE(op_new_func_exp);
3456         }
3457
3458         case op_typeof: {
3459             set(VirtualRegister(currentInstruction[1].u.operand),
3460                 addToGraph(TypeOf, get(VirtualRegister(currentInstruction[2].u.operand))));
3461             NEXT_OPCODE(op_typeof);
3462         }
3463
3464         case op_to_number: {
3465             Node* node = get(VirtualRegister(currentInstruction[2].u.operand));
3466             addToGraph(Phantom, Edge(node, NumberUse));
3467             set(VirtualRegister(currentInstruction[1].u.operand), node);
3468             NEXT_OPCODE(op_to_number);
3469         }
3470             
3471         case op_in: {
3472             set(VirtualRegister(currentInstruction[1].u.operand),
3473                 addToGraph(In, get(VirtualRegister(currentInstruction[2].u.operand)), get(VirtualRegister(currentInstruction[3].u.operand))));
3474             NEXT_OPCODE(op_in);
3475         }
3476
3477         case op_get_enumerable_length: {
3478             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(GetEnumerableLength, 
3479                 get(VirtualRegister(currentInstruction[2].u.operand))));
3480             NEXT_OPCODE(op_get_enumerable_length);
3481         }
3482
3483         case op_has_generic_property: {
3484             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(HasGenericProperty, 
3485                 get(VirtualRegister(currentInstruction[2].u.operand)),
3486                 get(VirtualRegister(currentInstruction[3].u.operand))));
3487             NEXT_OPCODE(op_has_generic_property);
3488         }
3489
3490         case op_has_structure_property: {
3491             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(HasStructureProperty, 
3492                 get(VirtualRegister(currentInstruction[2].u.operand)),
3493                 get(VirtualRegister(currentInstruction[3].u.operand)),
3494                 get(VirtualRegister(currentInstruction[4].u.operand))));
3495             NEXT_OPCODE(op_has_structure_property);
3496         }
3497
3498         case op_has_indexed_property: {
3499             Node* base = get(VirtualRegister(currentInstruction[2].u.operand));
3500             ArrayMode arrayMode = getArrayModeConsideringSlowPath(currentInstruction[4].u.arrayProfile, Array::Read);
3501             Node* property = get(VirtualRegister(currentInstruction[3].u.operand));
3502             Node* hasIterableProperty = addToGraph(HasIndexedProperty, OpInfo(arrayMode.asWord()), base, property);
3503             set(VirtualRegister(currentInstruction[1].u.operand), hasIterableProperty);
3504             NEXT_OPCODE(op_has_indexed_property);
3505         }
3506
3507         case op_get_direct_pname: {
3508             SpeculatedType prediction = getPredictionWithoutOSRExit();
3509             
3510             Node* base = get(VirtualRegister(currentInstruction[2].u.operand));
3511             Node* property = get(VirtualRegister(currentInstruction[3].u.operand));
3512             Node* index = get(VirtualRegister(currentInstruction[4].u.operand));
3513             Node* enumerator = get(VirtualRegister(currentInstruction[5].u.operand));
3514
3515             addVarArgChild(base);
3516             addVarArgChild(property);
3517             addVarArgChild(index);
3518             addVarArgChild(enumerator);
3519             set(VirtualRegister(currentInstruction[1].u.operand), 
3520                 addToGraph(Node::VarArg, GetDirectPname, OpInfo(0), OpInfo(prediction)));
3521
3522             NEXT_OPCODE(op_get_direct_pname);
3523         }
3524
3525         case op_get_structure_property_enumerator: {
3526             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(GetStructurePropertyEnumerator, 
3527                 get(VirtualRegister(currentInstruction[2].u.operand)),
3528                 get(VirtualRegister(currentInstruction[3].u.operand))));
3529             NEXT_OPCODE(op_get_structure_property_enumerator);
3530         }
3531
3532         case op_get_generic_property_enumerator: {
3533             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(GetGenericPropertyEnumerator, 
3534                 get(VirtualRegister(currentInstruction[2].u.operand)),
3535                 get(VirtualRegister(currentInstruction[3].u.operand)),
3536                 get(VirtualRegister(currentInstruction[4].u.operand))));
3537             NEXT_OPCODE(op_get_generic_property_enumerator);
3538         }
3539
3540         case op_next_enumerator_pname: {
3541             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(GetEnumeratorPname, 
3542                 get(VirtualRegister(currentInstruction[2].u.operand)),
3543                 get(VirtualRegister(currentInstruction[3].u.operand))));
3544             NEXT_OPCODE(op_next_enumerator_pname);
3545         }
3546
3547         case op_to_index_string: {
3548             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(ToIndexString, 
3549                 get(VirtualRegister(currentInstruction[2].u.operand))));
3550             NEXT_OPCODE(op_to_index_string);
3551         }
3552
3553         default:
3554             // Parse failed! This should not happen because the capabilities checker
3555             // should have caught it.
3556             RELEASE_ASSERT_NOT_REACHED();
3557             return false;
3558         }
3559     }
3560 }
3561
3562 void ByteCodeParser::linkBlock(BasicBlock* block, Vector<BasicBlock*>& possibleTargets)
3563 {
3564     ASSERT(!block->isLinked);
3565     ASSERT(!block->isEmpty());
3566     Node* node = block->last();
3567     ASSERT(node->isTerminal());
3568     
3569     switch (node->op()) {
3570     case Jump:
3571         node->targetBlock() = blockForBytecodeOffset(possibleTargets, node->targetBytecodeOffsetDuringParsing());
3572         break;
3573         
3574     case Branch: {
3575         BranchData* data = node->branchData();
3576         data->taken.block = blockForBytecodeOffset(possibleTargets, data->takenBytecodeIndex());
3577         data->notTaken.block = blockForBytecodeOffset(possibleTargets, data->notTakenBytecodeIndex());
3578         break;
3579     }
3580         
3581     case Switch: {
3582         SwitchData* data = node->switchData();
3583         for (unsigned i = node->switchData()->cases.size(); i--;)
3584             data->cases[i].target.block = blockForBytecodeOffset(possibleTargets, data->cases[i].target.bytecodeIndex());
3585         data->fallThrough.block = blockForBytecodeOffset(possibleTargets, data->fallThrough.bytecodeIndex());
3586         break;
3587     }
3588         
3589     default:
3590         break;
3591     }
3592     
3593     if (verbose)
3594         dataLog("Marking ", RawPointer(block), " as linked (actually did linking)\n");
3595     block->didLink();
3596 }
3597
3598 void ByteCodeParser::linkBlocks(Vector<UnlinkedBlock>& unlinkedBlocks, Vector<BasicBlock*>& possibleTargets)