ed809f24dcc26a95b304c012e17c9cd3e19486ad
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGByteCodeParser.cpp
1  /*
2  * Copyright (C) 2011, 2012, 2013, 2014 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGByteCodeParser.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "ArrayConstructor.h"
32 #include "BasicBlockLocation.h"
33 #include "CallLinkStatus.h"
34 #include "CodeBlock.h"
35 #include "CodeBlockWithJITType.h"
36 #include "DFGArrayMode.h"
37 #include "DFGCapabilities.h"
38 #include "DFGJITCode.h"
39 #include "GetByIdStatus.h"
40 #include "Heap.h"
41 #include "JSLexicalEnvironment.h"
42 #include "JSCInlines.h"
43 #include "PreciseJumpTargets.h"
44 #include "PutByIdStatus.h"
45 #include "StackAlignment.h"
46 #include "StringConstructor.h"
47 #include <wtf/CommaPrinter.h>
48 #include <wtf/HashMap.h>
49 #include <wtf/MathExtras.h>
50 #include <wtf/StdLibExtras.h>
51
52 namespace JSC { namespace DFG {
53
54 static const bool verbose = false;
55
56 class ConstantBufferKey {
57 public:
58     ConstantBufferKey()
59         : m_codeBlock(0)
60         , m_index(0)
61     {
62     }
63     
64     ConstantBufferKey(WTF::HashTableDeletedValueType)
65         : m_codeBlock(0)
66         , m_index(1)
67     {
68     }
69     
70     ConstantBufferKey(CodeBlock* codeBlock, unsigned index)
71         : m_codeBlock(codeBlock)
72         , m_index(index)
73     {
74     }
75     
76     bool operator==(const ConstantBufferKey& other) const
77     {
78         return m_codeBlock == other.m_codeBlock
79             && m_index == other.m_index;
80     }
81     
82     unsigned hash() const
83     {
84         return WTF::PtrHash<CodeBlock*>::hash(m_codeBlock) ^ m_index;
85     }
86     
87     bool isHashTableDeletedValue() const
88     {
89         return !m_codeBlock && m_index;
90     }
91     
92     CodeBlock* codeBlock() const { return m_codeBlock; }
93     unsigned index() const { return m_index; }
94     
95 private:
96     CodeBlock* m_codeBlock;
97     unsigned m_index;
98 };
99
100 struct ConstantBufferKeyHash {
101     static unsigned hash(const ConstantBufferKey& key) { return key.hash(); }
102     static bool equal(const ConstantBufferKey& a, const ConstantBufferKey& b)
103     {
104         return a == b;
105     }
106     
107     static const bool safeToCompareToEmptyOrDeleted = true;
108 };
109
110 } } // namespace JSC::DFG
111
112 namespace WTF {
113
114 template<typename T> struct DefaultHash;
115 template<> struct DefaultHash<JSC::DFG::ConstantBufferKey> {
116     typedef JSC::DFG::ConstantBufferKeyHash Hash;
117 };
118
119 template<typename T> struct HashTraits;
120 template<> struct HashTraits<JSC::DFG::ConstantBufferKey> : SimpleClassHashTraits<JSC::DFG::ConstantBufferKey> { };
121
122 } // namespace WTF
123
124 namespace JSC { namespace DFG {
125
126 // === ByteCodeParser ===
127 //
128 // This class is used to compile the dataflow graph from a CodeBlock.
129 class ByteCodeParser {
130 public:
131     ByteCodeParser(Graph& graph)
132         : m_vm(&graph.m_vm)
133         , m_codeBlock(graph.m_codeBlock)
134         , m_profiledBlock(graph.m_profiledBlock)
135         , m_graph(graph)
136         , m_currentBlock(0)
137         , m_currentIndex(0)
138         , m_constantUndefined(graph.freeze(jsUndefined()))
139         , m_constantNull(graph.freeze(jsNull()))
140         , m_constantNaN(graph.freeze(jsNumber(PNaN)))
141         , m_constantOne(graph.freeze(jsNumber(1)))
142         , m_numArguments(m_codeBlock->numParameters())
143         , m_numLocals(m_codeBlock->m_numCalleeRegisters)
144         , m_parameterSlots(0)
145         , m_numPassedVarArgs(0)
146         , m_inlineStackTop(0)
147         , m_haveBuiltOperandMaps(false)
148         , m_currentInstruction(0)
149     {
150         ASSERT(m_profiledBlock);
151     }
152     
153     // Parse a full CodeBlock of bytecode.
154     bool parse();
155     
156 private:
157     struct InlineStackEntry;
158
159     // Just parse from m_currentIndex to the end of the current CodeBlock.
160     void parseCodeBlock();
161     
162     void ensureLocals(unsigned newNumLocals)
163     {
164         if (newNumLocals <= m_numLocals)
165             return;
166         m_numLocals = newNumLocals;
167         for (size_t i = 0; i < m_graph.numBlocks(); ++i)
168             m_graph.block(i)->ensureLocals(newNumLocals);
169     }
170
171     // Helper for min and max.
172     bool handleMinMax(int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis);
173     
174     // Handle calls. This resolves issues surrounding inlining and intrinsics.
175     void handleCall(
176         int result, NodeType op, InlineCallFrame::Kind, unsigned instructionSize,
177         Node* callTarget, int argCount, int registerOffset, CallLinkStatus,
178         SpeculatedType prediction);
179     void handleCall(
180         int result, NodeType op, InlineCallFrame::Kind, unsigned instructionSize,
181         Node* callTarget, int argCount, int registerOffset, CallLinkStatus);
182     void handleCall(int result, NodeType op, CodeSpecializationKind, unsigned instructionSize, int callee, int argCount, int registerOffset);
183     void handleCall(Instruction* pc, NodeType op, CodeSpecializationKind);
184     void emitFunctionChecks(CallVariant, Node* callTarget, int registerOffset, CodeSpecializationKind);
185     void undoFunctionChecks(CallVariant);
186     void emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis, CodeSpecializationKind);
187     unsigned inliningCost(CallVariant, int argumentCountIncludingThis, CodeSpecializationKind); // Return UINT_MAX if it's not an inlining candidate. By convention, intrinsics have a cost of 1.
188     // Handle inlining. Return true if it succeeded, false if we need to plant a call.
189     bool handleInlining(Node* callTargetNode, int resultOperand, const CallLinkStatus&, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, NodeType callOp, InlineCallFrame::Kind, SpeculatedType prediction);
190     enum CallerLinkability { CallerDoesNormalLinking, CallerLinksManually };
191     bool attemptToInlineCall(Node* callTargetNode, int resultOperand, CallVariant, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind, CallerLinkability, SpeculatedType prediction, unsigned& inliningBalance);
192     void inlineCall(Node* callTargetNode, int resultOperand, CallVariant, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind, CallerLinkability);
193     void cancelLinkingForBlock(InlineStackEntry*, BasicBlock*); // Only works when the given block is the last one to have been added for that inline stack entry.
194     // Handle intrinsic functions. Return true if it succeeded, false if we need to plant a call.
195     bool handleIntrinsic(int resultOperand, Intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction);
196     bool handleTypedArrayConstructor(int resultOperand, InternalFunction*, int registerOffset, int argumentCountIncludingThis, TypedArrayType);
197     bool handleConstantInternalFunction(int resultOperand, InternalFunction*, int registerOffset, int argumentCountIncludingThis, CodeSpecializationKind);
198     Node* handlePutByOffset(Node* base, unsigned identifier, PropertyOffset, Node* value);
199     Node* handleGetByOffset(SpeculatedType, Node* base, const StructureSet&, unsigned identifierNumber, PropertyOffset, NodeType op = GetByOffset);
200     void handleGetById(
201         int destinationOperand, SpeculatedType, Node* base, unsigned identifierNumber,
202         const GetByIdStatus&);
203     void emitPutById(
204         Node* base, unsigned identifierNumber, Node* value,  const PutByIdStatus&, bool isDirect);
205     void handlePutById(
206         Node* base, unsigned identifierNumber, Node* value, const PutByIdStatus&,
207         bool isDirect);
208     void emitChecks(const ConstantStructureCheckVector&);
209
210     Node* getScope(VirtualRegister scopeChain, unsigned skipCount);
211     
212     void prepareToParseBlock();
213     void clearCaches();
214
215     // Parse a single basic block of bytecode instructions.
216     bool parseBlock(unsigned limit);
217     // Link block successors.
218     void linkBlock(BasicBlock*, Vector<BasicBlock*>& possibleTargets);
219     void linkBlocks(Vector<UnlinkedBlock>& unlinkedBlocks, Vector<BasicBlock*>& possibleTargets);
220     
221     VariableAccessData* newVariableAccessData(VirtualRegister operand, bool isCaptured)
222     {
223         ASSERT(!operand.isConstant());
224         
225         m_graph.m_variableAccessData.append(VariableAccessData(operand, isCaptured));
226         return &m_graph.m_variableAccessData.last();
227     }
228     
229     // Get/Set the operands/result of a bytecode instruction.
230     Node* getDirect(VirtualRegister operand)
231     {
232         ASSERT(!operand.isConstant());
233
234         // Is this an argument?
235         if (operand.isArgument())
236             return getArgument(operand);
237
238         // Must be a local.
239         return getLocal(operand);
240     }
241
242     Node* get(VirtualRegister operand)
243     {
244         if (operand.isConstant()) {
245             unsigned constantIndex = operand.toConstantIndex();
246             unsigned oldSize = m_constants.size();
247             if (constantIndex >= oldSize || !m_constants[constantIndex]) {
248                 JSValue value = m_inlineStackTop->m_codeBlock->getConstant(operand.offset());
249                 if (constantIndex >= oldSize) {
250                     m_constants.grow(constantIndex + 1);
251                     for (unsigned i = oldSize; i < m_constants.size(); ++i)
252                         m_constants[i] = nullptr;
253                 }
254                 m_constants[constantIndex] =
255                     addToGraph(JSConstant, OpInfo(m_graph.freezeStrong(value)));
256             }
257             ASSERT(m_constants[constantIndex]);
258             return m_constants[constantIndex];
259         }
260         
261         if (inlineCallFrame()) {
262             if (!inlineCallFrame()->isClosureCall) {
263                 JSFunction* callee = inlineCallFrame()->calleeConstant();
264                 if (operand.offset() == JSStack::Callee)
265                     return weakJSConstant(callee);
266                 if (operand == m_inlineStackTop->m_codeBlock->scopeRegister())
267                     return weakJSConstant(callee->scope());
268             }
269         } else if (operand.offset() == JSStack::Callee)
270             return addToGraph(GetCallee);
271         
272         return getDirect(m_inlineStackTop->remapOperand(operand));
273     }
274     
275     enum SetMode {
276         // A normal set which follows a two-phase commit that spans code origins. During
277         // the current code origin it issues a MovHint, and at the start of the next
278         // code origin there will be a SetLocal. If the local needs flushing, the second
279         // SetLocal will be preceded with a Flush.
280         NormalSet,
281         
282         // A set where the SetLocal happens immediately and there is still a Flush. This
283         // is relevant when assigning to a local in tricky situations for the delayed
284         // SetLocal logic but where we know that we have not performed any side effects
285         // within this code origin. This is a safe replacement for NormalSet anytime we
286         // know that we have not yet performed side effects in this code origin.
287         ImmediateSetWithFlush,
288         
289         // A set where the SetLocal happens immediately and we do not Flush it even if
290         // this is a local that is marked as needing it. This is relevant when
291         // initializing locals at the top of a function.
292         ImmediateNakedSet
293     };
294     Node* setDirect(VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
295     {
296         addToGraph(MovHint, OpInfo(operand.offset()), value);
297         
298         DelayedSetLocal delayed = DelayedSetLocal(operand, value);
299         
300         if (setMode == NormalSet) {
301             m_setLocalQueue.append(delayed);
302             return 0;
303         }
304         
305         return delayed.execute(this, setMode);
306     }
307     
308     void processSetLocalQueue()
309     {
310         for (unsigned i = 0; i < m_setLocalQueue.size(); ++i)
311             m_setLocalQueue[i].execute(this);
312         m_setLocalQueue.resize(0);
313     }
314
315     Node* set(VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
316     {
317         return setDirect(m_inlineStackTop->remapOperand(operand), value, setMode);
318     }
319     
320     Node* injectLazyOperandSpeculation(Node* node)
321     {
322         ASSERT(node->op() == GetLocal);
323         ASSERT(node->origin.semantic.bytecodeIndex == m_currentIndex);
324         ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
325         LazyOperandValueProfileKey key(m_currentIndex, node->local());
326         SpeculatedType prediction = m_inlineStackTop->m_lazyOperands.prediction(locker, key);
327         node->variableAccessData()->predict(prediction);
328         return node;
329     }
330
331     // Used in implementing get/set, above, where the operand is a local variable.
332     Node* getLocal(VirtualRegister operand)
333     {
334         unsigned local = operand.toLocal();
335
336         if (local < m_localWatchpoints.size()) {
337             if (VariableWatchpointSet* set = m_localWatchpoints[local]) {
338                 if (JSValue value = set->inferredValue()) {
339                     addToGraph(FunctionReentryWatchpoint, OpInfo(m_codeBlock->symbolTable()));
340                     addToGraph(VariableWatchpoint, OpInfo(set));
341                     return weakJSConstant(value);
342                 }
343             }
344         }
345
346         Node* node = m_currentBlock->variablesAtTail.local(local);
347         bool isCaptured = m_codeBlock->isCaptured(operand, inlineCallFrame());
348         
349         // This has two goals: 1) link together variable access datas, and 2)
350         // try to avoid creating redundant GetLocals. (1) is required for
351         // correctness - no other phase will ensure that block-local variable
352         // access data unification is done correctly. (2) is purely opportunistic
353         // and is meant as an compile-time optimization only.
354         
355         VariableAccessData* variable;
356         
357         if (node) {
358             variable = node->variableAccessData();
359             variable->mergeIsCaptured(isCaptured);
360             
361             if (!isCaptured) {
362                 switch (node->op()) {
363                 case GetLocal:
364                     return node;
365                 case SetLocal:
366                     return node->child1().node();
367                 default:
368                     break;
369                 }
370             }
371         } else
372             variable = newVariableAccessData(operand, isCaptured);
373         
374         node = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variable)));
375         m_currentBlock->variablesAtTail.local(local) = node;
376         return node;
377     }
378
379     Node* setLocal(VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
380     {
381         unsigned local = operand.toLocal();
382         bool isCaptured = m_codeBlock->isCaptured(operand, inlineCallFrame());
383         
384         if (setMode != ImmediateNakedSet) {
385             ArgumentPosition* argumentPosition = findArgumentPositionForLocal(operand);
386             if (isCaptured || argumentPosition)
387                 flushDirect(operand, argumentPosition);
388         }
389
390         VariableAccessData* variableAccessData = newVariableAccessData(operand, isCaptured);
391         variableAccessData->mergeStructureCheckHoistingFailed(
392             m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
393         variableAccessData->mergeCheckArrayHoistingFailed(
394             m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIndexingType));
395         Node* node = addToGraph(SetLocal, OpInfo(variableAccessData), value);
396         m_currentBlock->variablesAtTail.local(local) = node;
397         return node;
398     }
399
400     // Used in implementing get/set, above, where the operand is an argument.
401     Node* getArgument(VirtualRegister operand)
402     {
403         unsigned argument = operand.toArgument();
404         ASSERT(argument < m_numArguments);
405         
406         Node* node = m_currentBlock->variablesAtTail.argument(argument);
407         bool isCaptured = m_codeBlock->isCaptured(operand);
408
409         VariableAccessData* variable;
410         
411         if (node) {
412             variable = node->variableAccessData();
413             variable->mergeIsCaptured(isCaptured);
414             
415             if (!isCaptured) {
416                 switch (node->op()) {
417                 case GetLocal:
418                     return node;
419                 case SetLocal:
420                     return node->child1().node();
421                 default:
422                     break;
423                 }
424             }
425         } else
426             variable = newVariableAccessData(operand, isCaptured);
427         
428         node = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variable)));
429         m_currentBlock->variablesAtTail.argument(argument) = node;
430         return node;
431     }
432     Node* setArgument(VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
433     {
434         unsigned argument = operand.toArgument();
435         ASSERT(argument < m_numArguments);
436         
437         bool isCaptured = m_codeBlock->isCaptured(operand);
438
439         VariableAccessData* variableAccessData = newVariableAccessData(operand, isCaptured);
440
441         // Always flush arguments, except for 'this'. If 'this' is created by us,
442         // then make sure that it's never unboxed.
443         if (argument) {
444             if (setMode != ImmediateNakedSet)
445                 flushDirect(operand);
446         } else if (m_codeBlock->specializationKind() == CodeForConstruct)
447             variableAccessData->mergeShouldNeverUnbox(true);
448         
449         variableAccessData->mergeStructureCheckHoistingFailed(
450             m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
451         variableAccessData->mergeCheckArrayHoistingFailed(
452             m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIndexingType));
453         Node* node = addToGraph(SetLocal, OpInfo(variableAccessData), value);
454         m_currentBlock->variablesAtTail.argument(argument) = node;
455         return node;
456     }
457     
458     ArgumentPosition* findArgumentPositionForArgument(int argument)
459     {
460         InlineStackEntry* stack = m_inlineStackTop;
461         while (stack->m_inlineCallFrame)
462             stack = stack->m_caller;
463         return stack->m_argumentPositions[argument];
464     }
465     
466     ArgumentPosition* findArgumentPositionForLocal(VirtualRegister operand)
467     {
468         for (InlineStackEntry* stack = m_inlineStackTop; ; stack = stack->m_caller) {
469             InlineCallFrame* inlineCallFrame = stack->m_inlineCallFrame;
470             if (!inlineCallFrame)
471                 break;
472             if (operand.offset() < static_cast<int>(inlineCallFrame->stackOffset + JSStack::CallFrameHeaderSize))
473                 continue;
474             if (operand.offset() == inlineCallFrame->stackOffset + CallFrame::thisArgumentOffset())
475                 continue;
476             if (operand.offset() >= static_cast<int>(inlineCallFrame->stackOffset + CallFrame::thisArgumentOffset() + inlineCallFrame->arguments.size()))
477                 continue;
478             int argument = VirtualRegister(operand.offset() - inlineCallFrame->stackOffset).toArgument();
479             return stack->m_argumentPositions[argument];
480         }
481         return 0;
482     }
483     
484     ArgumentPosition* findArgumentPosition(VirtualRegister operand)
485     {
486         if (operand.isArgument())
487             return findArgumentPositionForArgument(operand.toArgument());
488         return findArgumentPositionForLocal(operand);
489     }
490
491     void flush(VirtualRegister operand)
492     {
493         flushDirect(m_inlineStackTop->remapOperand(operand));
494     }
495     
496     void flushDirect(VirtualRegister operand)
497     {
498         flushDirect(operand, findArgumentPosition(operand));
499     }
500     
501     void flushDirect(VirtualRegister operand, ArgumentPosition* argumentPosition)
502     {
503         bool isCaptured = m_codeBlock->isCaptured(operand, inlineCallFrame());
504         
505         ASSERT(!operand.isConstant());
506         
507         Node* node = m_currentBlock->variablesAtTail.operand(operand);
508         
509         VariableAccessData* variable;
510         
511         if (node) {
512             variable = node->variableAccessData();
513             variable->mergeIsCaptured(isCaptured);
514         } else
515             variable = newVariableAccessData(operand, isCaptured);
516         
517         node = addToGraph(Flush, OpInfo(variable));
518         m_currentBlock->variablesAtTail.operand(operand) = node;
519         if (argumentPosition)
520             argumentPosition->addVariable(variable);
521     }
522     
523     void flush(InlineStackEntry* inlineStackEntry)
524     {
525         int numArguments;
526         if (InlineCallFrame* inlineCallFrame = inlineStackEntry->m_inlineCallFrame) {
527             numArguments = inlineCallFrame->arguments.size();
528             if (inlineCallFrame->isClosureCall)
529                 flushDirect(inlineStackEntry->remapOperand(VirtualRegister(JSStack::Callee)));
530         } else
531             numArguments = inlineStackEntry->m_codeBlock->numParameters();
532         for (unsigned argument = numArguments; argument-- > 1;)
533             flushDirect(inlineStackEntry->remapOperand(virtualRegisterForArgument(argument)));
534         for (int local = 0; local < inlineStackEntry->m_codeBlock->m_numVars; ++local) {
535             if (!inlineStackEntry->m_codeBlock->isCaptured(virtualRegisterForLocal(local)))
536                 continue;
537             flushDirect(inlineStackEntry->remapOperand(virtualRegisterForLocal(local)));
538         }
539     }
540
541     void flushForTerminal()
542     {
543         for (InlineStackEntry* inlineStackEntry = m_inlineStackTop; inlineStackEntry; inlineStackEntry = inlineStackEntry->m_caller)
544             flush(inlineStackEntry);
545     }
546
547     void flushForReturn()
548     {
549         flush(m_inlineStackTop);
550     }
551     
552     void flushIfTerminal(SwitchData& data)
553     {
554         if (data.fallThrough.bytecodeIndex() > m_currentIndex)
555             return;
556         
557         for (unsigned i = data.cases.size(); i--;) {
558             if (data.cases[i].target.bytecodeIndex() > m_currentIndex)
559                 return;
560         }
561         
562         flushForTerminal();
563     }
564
565     // Assumes that the constant should be strongly marked.
566     Node* jsConstant(JSValue constantValue)
567     {
568         return addToGraph(JSConstant, OpInfo(m_graph.freezeStrong(constantValue)));
569     }
570
571     Node* weakJSConstant(JSValue constantValue)
572     {
573         return addToGraph(JSConstant, OpInfo(m_graph.freeze(constantValue)));
574     }
575
576     // Helper functions to get/set the this value.
577     Node* getThis()
578     {
579         return get(m_inlineStackTop->m_codeBlock->thisRegister());
580     }
581
582     void setThis(Node* value)
583     {
584         set(m_inlineStackTop->m_codeBlock->thisRegister(), value);
585     }
586
587     InlineCallFrame* inlineCallFrame()
588     {
589         return m_inlineStackTop->m_inlineCallFrame;
590     }
591
592     CodeOrigin currentCodeOrigin()
593     {
594         return CodeOrigin(m_currentIndex, inlineCallFrame());
595     }
596     
597     BranchData* branchData(unsigned taken, unsigned notTaken)
598     {
599         // We assume that branches originating from bytecode always have a fall-through. We
600         // use this assumption to avoid checking for the creation of terminal blocks.
601         ASSERT((taken > m_currentIndex) || (notTaken > m_currentIndex));
602         BranchData* data = m_graph.m_branchData.add();
603         *data = BranchData::withBytecodeIndices(taken, notTaken);
604         return data;
605     }
606     
607     Node* addToGraph(NodeType op, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
608     {
609         Node* result = m_graph.addNode(
610             SpecNone, op, NodeOrigin(currentCodeOrigin()), Edge(child1), Edge(child2),
611             Edge(child3));
612         ASSERT(op != Phi);
613         m_currentBlock->append(result);
614         return result;
615     }
616     Node* addToGraph(NodeType op, Edge child1, Edge child2 = Edge(), Edge child3 = Edge())
617     {
618         Node* result = m_graph.addNode(
619             SpecNone, op, NodeOrigin(currentCodeOrigin()), child1, child2, child3);
620         ASSERT(op != Phi);
621         m_currentBlock->append(result);
622         return result;
623     }
624     Node* addToGraph(NodeType op, OpInfo info, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
625     {
626         Node* result = m_graph.addNode(
627             SpecNone, op, NodeOrigin(currentCodeOrigin()), info, Edge(child1), Edge(child2),
628             Edge(child3));
629         ASSERT(op != Phi);
630         m_currentBlock->append(result);
631         return result;
632     }
633     Node* addToGraph(NodeType op, OpInfo info1, OpInfo info2, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
634     {
635         Node* result = m_graph.addNode(
636             SpecNone, op, NodeOrigin(currentCodeOrigin()), info1, info2,
637             Edge(child1), Edge(child2), Edge(child3));
638         ASSERT(op != Phi);
639         m_currentBlock->append(result);
640         return result;
641     }
642     
643     Node* addToGraph(Node::VarArgTag, NodeType op, OpInfo info1, OpInfo info2)
644     {
645         Node* result = m_graph.addNode(
646             SpecNone, Node::VarArg, op, NodeOrigin(currentCodeOrigin()), info1, info2,
647             m_graph.m_varArgChildren.size() - m_numPassedVarArgs, m_numPassedVarArgs);
648         ASSERT(op != Phi);
649         m_currentBlock->append(result);
650         
651         m_numPassedVarArgs = 0;
652         
653         return result;
654     }
655     
656     void removeLastNodeFromGraph(NodeType expectedNodeType)
657     {
658         Node* node = m_currentBlock->takeLast();
659         RELEASE_ASSERT(node->op() == expectedNodeType);
660         m_graph.m_allocator.free(node);
661     }
662
663     void addVarArgChild(Node* child)
664     {
665         m_graph.m_varArgChildren.append(Edge(child));
666         m_numPassedVarArgs++;
667     }
668     
669     Node* addCallWithoutSettingResult(
670         NodeType op, OpInfo opInfo, Node* callee, int argCount, int registerOffset,
671         SpeculatedType prediction)
672     {
673         addVarArgChild(callee);
674         size_t parameterSlots = JSStack::CallFrameHeaderSize - JSStack::CallerFrameAndPCSize + argCount;
675         if (parameterSlots > m_parameterSlots)
676             m_parameterSlots = parameterSlots;
677
678         int dummyThisArgument = op == Call || op == NativeCall || op == ProfiledCall ? 0 : 1;
679         for (int i = 0 + dummyThisArgument; i < argCount; ++i)
680             addVarArgChild(get(virtualRegisterForArgument(i, registerOffset)));
681
682         return addToGraph(Node::VarArg, op, opInfo, OpInfo(prediction));
683     }
684     
685     Node* addCall(
686         int result, NodeType op, OpInfo opInfo, Node* callee, int argCount, int registerOffset,
687         SpeculatedType prediction)
688     {
689         Node* call = addCallWithoutSettingResult(
690             op, opInfo, callee, argCount, registerOffset, prediction);
691         VirtualRegister resultReg(result);
692         if (resultReg.isValid())
693             set(VirtualRegister(result), call);
694         return call;
695     }
696     
697     Node* cellConstantWithStructureCheck(JSCell* object, Structure* structure)
698     {
699         Node* objectNode = weakJSConstant(object);
700         addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(structure)), objectNode);
701         return objectNode;
702     }
703     
704     SpeculatedType getPredictionWithoutOSRExit(unsigned bytecodeIndex)
705     {
706         ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
707         return m_inlineStackTop->m_profiledBlock->valueProfilePredictionForBytecodeOffset(locker, bytecodeIndex);
708     }
709
710     SpeculatedType getPrediction(unsigned bytecodeIndex)
711     {
712         SpeculatedType prediction = getPredictionWithoutOSRExit(bytecodeIndex);
713         
714         if (prediction == SpecNone) {
715             // We have no information about what values this node generates. Give up
716             // on executing this code, since we're likely to do more damage than good.
717             addToGraph(ForceOSRExit);
718         }
719         
720         return prediction;
721     }
722     
723     SpeculatedType getPredictionWithoutOSRExit()
724     {
725         return getPredictionWithoutOSRExit(m_currentIndex);
726     }
727     
728     SpeculatedType getPrediction()
729     {
730         return getPrediction(m_currentIndex);
731     }
732     
733     ArrayMode getArrayMode(ArrayProfile* profile, Array::Action action)
734     {
735         ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
736         profile->computeUpdatedPrediction(locker, m_inlineStackTop->m_profiledBlock);
737         return ArrayMode::fromObserved(locker, profile, action, false);
738     }
739     
740     ArrayMode getArrayMode(ArrayProfile* profile)
741     {
742         return getArrayMode(profile, Array::Read);
743     }
744     
745     ArrayMode getArrayModeConsideringSlowPath(ArrayProfile* profile, Array::Action action)
746     {
747         ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
748         
749         profile->computeUpdatedPrediction(locker, m_inlineStackTop->m_profiledBlock);
750         
751         bool makeSafe =
752             m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)
753             || profile->outOfBounds(locker);
754         
755         ArrayMode result = ArrayMode::fromObserved(locker, profile, action, makeSafe);
756         
757         return result;
758     }
759     
760     Node* makeSafe(Node* node)
761     {
762         if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
763             node->mergeFlags(NodeMayOverflowInDFG);
764         if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
765             node->mergeFlags(NodeMayNegZeroInDFG);
766         
767         if (!isX86() && node->op() == ArithMod)
768             return node;
769
770         if (!m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex))
771             return node;
772         
773         switch (node->op()) {
774         case UInt32ToNumber:
775         case ArithAdd:
776         case ArithSub:
777         case ValueAdd:
778         case ArithMod: // for ArithMod "MayOverflow" means we tried to divide by zero, or we saw double.
779             node->mergeFlags(NodeMayOverflowInBaseline);
780             break;
781             
782         case ArithNegate:
783             // Currently we can't tell the difference between a negation overflowing
784             // (i.e. -(1 << 31)) or generating negative zero (i.e. -0). If it took slow
785             // path then we assume that it did both of those things.
786             node->mergeFlags(NodeMayOverflowInBaseline);
787             node->mergeFlags(NodeMayNegZeroInBaseline);
788             break;
789
790         case ArithMul:
791             // FIXME: We should detect cases where we only overflowed but never created
792             // negative zero.
793             // https://bugs.webkit.org/show_bug.cgi?id=132470
794             if (m_inlineStackTop->m_profiledBlock->likelyToTakeDeepestSlowCase(m_currentIndex)
795                 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
796                 node->mergeFlags(NodeMayOverflowInBaseline | NodeMayNegZeroInBaseline);
797             else if (m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)
798                 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
799                 node->mergeFlags(NodeMayNegZeroInBaseline);
800             break;
801             
802         default:
803             RELEASE_ASSERT_NOT_REACHED();
804             break;
805         }
806         
807         return node;
808     }
809     
810     Node* makeDivSafe(Node* node)
811     {
812         ASSERT(node->op() == ArithDiv);
813         
814         if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
815             node->mergeFlags(NodeMayOverflowInDFG);
816         if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
817             node->mergeFlags(NodeMayNegZeroInDFG);
818         
819         // The main slow case counter for op_div in the old JIT counts only when
820         // the operands are not numbers. We don't care about that since we already
821         // have speculations in place that take care of that separately. We only
822         // care about when the outcome of the division is not an integer, which
823         // is what the special fast case counter tells us.
824         
825         if (!m_inlineStackTop->m_profiledBlock->couldTakeSpecialFastCase(m_currentIndex))
826             return node;
827         
828         // FIXME: It might be possible to make this more granular.
829         node->mergeFlags(NodeMayOverflowInBaseline | NodeMayNegZeroInBaseline);
830         
831         return node;
832     }
833     
834     void buildOperandMapsIfNecessary();
835     
836     VM* m_vm;
837     CodeBlock* m_codeBlock;
838     CodeBlock* m_profiledBlock;
839     Graph& m_graph;
840
841     // The current block being generated.
842     BasicBlock* m_currentBlock;
843     // The bytecode index of the current instruction being generated.
844     unsigned m_currentIndex;
845
846     FrozenValue* m_constantUndefined;
847     FrozenValue* m_constantNull;
848     FrozenValue* m_constantNaN;
849     FrozenValue* m_constantOne;
850     Vector<Node*, 16> m_constants;
851
852     // The number of arguments passed to the function.
853     unsigned m_numArguments;
854     // The number of locals (vars + temporaries) used in the function.
855     unsigned m_numLocals;
856     // The number of slots (in units of sizeof(Register)) that we need to
857     // preallocate for arguments to outgoing calls from this frame. This
858     // number includes the CallFrame slots that we initialize for the callee
859     // (but not the callee-initialized CallerFrame and ReturnPC slots).
860     // This number is 0 if and only if this function is a leaf.
861     unsigned m_parameterSlots;
862     // The number of var args passed to the next var arg node.
863     unsigned m_numPassedVarArgs;
864
865     HashMap<ConstantBufferKey, unsigned> m_constantBufferCache;
866     
867     Vector<VariableWatchpointSet*, 16> m_localWatchpoints;
868     
869     struct InlineStackEntry {
870         ByteCodeParser* m_byteCodeParser;
871         
872         CodeBlock* m_codeBlock;
873         CodeBlock* m_profiledBlock;
874         InlineCallFrame* m_inlineCallFrame;
875         
876         ScriptExecutable* executable() { return m_codeBlock->ownerExecutable(); }
877         
878         QueryableExitProfile m_exitProfile;
879         
880         // Remapping of identifier and constant numbers from the code block being
881         // inlined (inline callee) to the code block that we're inlining into
882         // (the machine code block, which is the transitive, though not necessarily
883         // direct, caller).
884         Vector<unsigned> m_identifierRemap;
885         Vector<unsigned> m_constantBufferRemap;
886         Vector<unsigned> m_switchRemap;
887         
888         // Blocks introduced by this code block, which need successor linking.
889         // May include up to one basic block that includes the continuation after
890         // the callsite in the caller. These must be appended in the order that they
891         // are created, but their bytecodeBegin values need not be in order as they
892         // are ignored.
893         Vector<UnlinkedBlock> m_unlinkedBlocks;
894         
895         // Potential block linking targets. Must be sorted by bytecodeBegin, and
896         // cannot have two blocks that have the same bytecodeBegin.
897         Vector<BasicBlock*> m_blockLinkingTargets;
898         
899         // If the callsite's basic block was split into two, then this will be
900         // the head of the callsite block. It needs its successors linked to the
901         // m_unlinkedBlocks, but not the other way around: there's no way for
902         // any blocks in m_unlinkedBlocks to jump back into this block.
903         BasicBlock* m_callsiteBlockHead;
904         
905         // Does the callsite block head need linking? This is typically true
906         // but will be false for the machine code block's inline stack entry
907         // (since that one is not inlined) and for cases where an inline callee
908         // did the linking for us.
909         bool m_callsiteBlockHeadNeedsLinking;
910         
911         VirtualRegister m_returnValue;
912         
913         // Speculations about variable types collected from the profiled code block,
914         // which are based on OSR exit profiles that past DFG compilatins of this
915         // code block had gathered.
916         LazyOperandValueProfileParser m_lazyOperands;
917         
918         CallLinkInfoMap m_callLinkInfos;
919         StubInfoMap m_stubInfos;
920         
921         // Did we see any returns? We need to handle the (uncommon but necessary)
922         // case where a procedure that does not return was inlined.
923         bool m_didReturn;
924         
925         // Did we have any early returns?
926         bool m_didEarlyReturn;
927         
928         // Pointers to the argument position trackers for this slice of code.
929         Vector<ArgumentPosition*> m_argumentPositions;
930         
931         InlineStackEntry* m_caller;
932         
933         InlineStackEntry(
934             ByteCodeParser*,
935             CodeBlock*,
936             CodeBlock* profiledBlock,
937             BasicBlock* callsiteBlockHead,
938             JSFunction* callee, // Null if this is a closure call.
939             VirtualRegister returnValueVR,
940             VirtualRegister inlineCallFrameStart,
941             int argumentCountIncludingThis,
942             InlineCallFrame::Kind);
943         
944         ~InlineStackEntry()
945         {
946             m_byteCodeParser->m_inlineStackTop = m_caller;
947         }
948         
949         VirtualRegister remapOperand(VirtualRegister operand) const
950         {
951             if (!m_inlineCallFrame)
952                 return operand;
953             
954             ASSERT(!operand.isConstant());
955
956             return VirtualRegister(operand.offset() + m_inlineCallFrame->stackOffset);
957         }
958     };
959     
960     InlineStackEntry* m_inlineStackTop;
961     
962     struct DelayedSetLocal {
963         VirtualRegister m_operand;
964         Node* m_value;
965         
966         DelayedSetLocal() { }
967         DelayedSetLocal(VirtualRegister operand, Node* value)
968             : m_operand(operand)
969             , m_value(value)
970         {
971         }
972         
973         Node* execute(ByteCodeParser* parser, SetMode setMode = NormalSet)
974         {
975             if (m_operand.isArgument())
976                 return parser->setArgument(m_operand, m_value, setMode);
977             return parser->setLocal(m_operand, m_value, setMode);
978         }
979     };
980     
981     Vector<DelayedSetLocal, 2> m_setLocalQueue;
982
983     // Have we built operand maps? We initialize them lazily, and only when doing
984     // inlining.
985     bool m_haveBuiltOperandMaps;
986     // Mapping between identifier names and numbers.
987     BorrowedIdentifierMap m_identifierMap;
988     
989     CodeBlock* m_dfgCodeBlock;
990     CallLinkStatus::ContextMap m_callContextMap;
991     StubInfoMap m_dfgStubInfos;
992     
993     Instruction* m_currentInstruction;
994 };
995
996 #define NEXT_OPCODE(name) \
997     m_currentIndex += OPCODE_LENGTH(name); \
998     continue
999
1000 #define LAST_OPCODE(name) \
1001     m_currentIndex += OPCODE_LENGTH(name); \
1002     return shouldContinueParsing
1003
1004 void ByteCodeParser::handleCall(Instruction* pc, NodeType op, CodeSpecializationKind kind)
1005 {
1006     ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct));
1007     handleCall(
1008         pc[1].u.operand, op, kind, OPCODE_LENGTH(op_call),
1009         pc[2].u.operand, pc[3].u.operand, -pc[4].u.operand);
1010 }
1011
1012 void ByteCodeParser::handleCall(
1013     int result, NodeType op, CodeSpecializationKind kind, unsigned instructionSize,
1014     int callee, int argumentCountIncludingThis, int registerOffset)
1015 {
1016     Node* callTarget = get(VirtualRegister(callee));
1017     
1018     CallLinkStatus callLinkStatus = CallLinkStatus::computeFor(
1019         m_inlineStackTop->m_profiledBlock, currentCodeOrigin(),
1020         m_inlineStackTop->m_callLinkInfos, m_callContextMap);
1021     
1022     handleCall(
1023         result, op, InlineCallFrame::kindFor(kind), instructionSize, callTarget,
1024         argumentCountIncludingThis, registerOffset, callLinkStatus);
1025 }
1026     
1027 void ByteCodeParser::handleCall(
1028     int result, NodeType op, InlineCallFrame::Kind kind, unsigned instructionSize,
1029     Node* callTarget, int argumentCountIncludingThis, int registerOffset,
1030     CallLinkStatus callLinkStatus)
1031 {
1032     handleCall(
1033         result, op, kind, instructionSize, callTarget, argumentCountIncludingThis,
1034         registerOffset, callLinkStatus, getPrediction());
1035 }
1036
1037 void ByteCodeParser::handleCall(
1038     int result, NodeType op, InlineCallFrame::Kind kind, unsigned instructionSize,
1039     Node* callTarget, int argumentCountIncludingThis, int registerOffset,
1040     CallLinkStatus callLinkStatus, SpeculatedType prediction)
1041 {
1042     ASSERT(registerOffset <= 0);
1043     
1044     if (callTarget->hasConstant())
1045         callLinkStatus = CallLinkStatus(callTarget->asJSValue()).setIsProved(true);
1046     
1047     if ((!callLinkStatus.canOptimize() || callLinkStatus.size() != 1)
1048         && !isFTL(m_graph.m_plan.mode) && Options::useFTLJIT()
1049         && InlineCallFrame::isNormalCall(kind)
1050         && CallEdgeLog::isEnabled()
1051         && Options::dfgDoesCallEdgeProfiling()) {
1052         ASSERT(op == Call || op == Construct);
1053         if (op == Call)
1054             op = ProfiledCall;
1055         else
1056             op = ProfiledConstruct;
1057     }
1058     
1059     if (!callLinkStatus.canOptimize()) {
1060         // Oddly, this conflates calls that haven't executed with calls that behaved sufficiently polymorphically
1061         // that we cannot optimize them.
1062         
1063         addCall(result, op, OpInfo(), callTarget, argumentCountIncludingThis, registerOffset, prediction);
1064         return;
1065     }
1066     
1067     unsigned nextOffset = m_currentIndex + instructionSize;
1068     
1069     OpInfo callOpInfo;
1070     
1071     if (handleInlining(callTarget, result, callLinkStatus, registerOffset, argumentCountIncludingThis, nextOffset, op, kind, prediction)) {
1072         if (m_graph.compilation())
1073             m_graph.compilation()->noticeInlinedCall();
1074         return;
1075     }
1076     
1077 #if ENABLE(FTL_NATIVE_CALL_INLINING)
1078     if (isFTL(m_graph.m_plan.mode) && Options::optimizeNativeCalls() && callLinkStatus.size() == 1 && !callLinkStatus.couldTakeSlowPath()) {
1079         CallVariant callee = callLinkStatus[0].callee();
1080         JSFunction* function = callee.function();
1081         CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1082         if (function && function->isHostFunction()) {
1083             emitFunctionChecks(callee, callTarget, registerOffset, specializationKind);
1084             callOpInfo = OpInfo(m_graph.freeze(function));
1085
1086             if (op == Call || op == ProfiledCall)
1087                 op = NativeCall;
1088             else {
1089                 ASSERT(op == Construct || op == ProfiledConstruct);
1090                 op = NativeConstruct;
1091             }
1092         }
1093     }
1094 #endif
1095     
1096     addCall(result, op, callOpInfo, callTarget, argumentCountIncludingThis, registerOffset, prediction);
1097 }
1098
1099 void ByteCodeParser::emitFunctionChecks(CallVariant callee, Node* callTarget, int registerOffset, CodeSpecializationKind kind)
1100 {
1101     Node* thisArgument;
1102     if (kind == CodeForCall)
1103         thisArgument = get(virtualRegisterForArgument(0, registerOffset));
1104     else
1105         thisArgument = 0;
1106
1107     JSCell* calleeCell;
1108     Node* callTargetForCheck;
1109     if (callee.isClosureCall()) {
1110         calleeCell = callee.executable();
1111         callTargetForCheck = addToGraph(GetExecutable, callTarget);
1112     } else {
1113         calleeCell = callee.nonExecutableCallee();
1114         callTargetForCheck = callTarget;
1115     }
1116     
1117     ASSERT(calleeCell);
1118     addToGraph(CheckCell, OpInfo(m_graph.freeze(calleeCell)), callTargetForCheck, thisArgument);
1119 }
1120
1121 void ByteCodeParser::undoFunctionChecks(CallVariant callee)
1122 {
1123     removeLastNodeFromGraph(CheckCell);
1124     if (callee.isClosureCall())
1125         removeLastNodeFromGraph(GetExecutable);
1126 }
1127
1128 void ByteCodeParser::emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis, CodeSpecializationKind kind)
1129 {
1130     for (int i = kind == CodeForCall ? 0 : 1; i < argumentCountIncludingThis; ++i)
1131         addToGraph(Phantom, get(virtualRegisterForArgument(i, registerOffset)));
1132 }
1133
1134 unsigned ByteCodeParser::inliningCost(CallVariant callee, int argumentCountIncludingThis, CodeSpecializationKind kind)
1135 {
1136     if (verbose)
1137         dataLog("Considering inlining ", callee, " into ", currentCodeOrigin(), "\n");
1138     
1139     FunctionExecutable* executable = callee.functionExecutable();
1140     if (!executable) {
1141         if (verbose)
1142             dataLog("    Failing because there is no function executable.");
1143         return UINT_MAX;
1144     }
1145     
1146     // Does the number of arguments we're passing match the arity of the target? We currently
1147     // inline only if the number of arguments passed is greater than or equal to the number
1148     // arguments expected.
1149     if (static_cast<int>(executable->parameterCount()) + 1 > argumentCountIncludingThis) {
1150         if (verbose)
1151             dataLog("    Failing because of arity mismatch.\n");
1152         return UINT_MAX;
1153     }
1154     
1155     // Do we have a code block, and does the code block's size match the heuristics/requirements for
1156     // being an inline candidate? We might not have a code block if code was thrown away or if we
1157     // simply hadn't actually made this call yet. We could still theoretically attempt to inline it
1158     // if we had a static proof of what was being called; this might happen for example if you call a
1159     // global function, where watchpointing gives us static information. Overall, it's a rare case
1160     // because we expect that any hot callees would have already been compiled.
1161     CodeBlock* codeBlock = executable->baselineCodeBlockFor(kind);
1162     if (!codeBlock) {
1163         if (verbose)
1164             dataLog("    Failing because no code block available.\n");
1165         return UINT_MAX;
1166     }
1167     CapabilityLevel capabilityLevel = inlineFunctionForCapabilityLevel(
1168         codeBlock, kind, callee.isClosureCall());
1169     if (!canInline(capabilityLevel)) {
1170         if (verbose)
1171             dataLog("    Failing because the function is not inlineable.\n");
1172         return UINT_MAX;
1173     }
1174     
1175     // Check if the caller is already too large. We do this check here because that's just
1176     // where we happen to also have the callee's code block, and we want that for the
1177     // purpose of unsetting SABI.
1178     if (!isSmallEnoughToInlineCodeInto(m_codeBlock)) {
1179         codeBlock->m_shouldAlwaysBeInlined = false;
1180         if (verbose)
1181             dataLog("    Failing because the caller is too large.\n");
1182         return UINT_MAX;
1183     }
1184     
1185     // FIXME: this should be better at predicting how much bloat we will introduce by inlining
1186     // this function.
1187     // https://bugs.webkit.org/show_bug.cgi?id=127627
1188     
1189     // Have we exceeded inline stack depth, or are we trying to inline a recursive call to
1190     // too many levels? If either of these are detected, then don't inline. We adjust our
1191     // heuristics if we are dealing with a function that cannot otherwise be compiled.
1192     
1193     unsigned depth = 0;
1194     unsigned recursion = 0;
1195     
1196     for (InlineStackEntry* entry = m_inlineStackTop; entry; entry = entry->m_caller) {
1197         ++depth;
1198         if (depth >= Options::maximumInliningDepth()) {
1199             if (verbose)
1200                 dataLog("    Failing because depth exceeded.\n");
1201             return UINT_MAX;
1202         }
1203         
1204         if (entry->executable() == executable) {
1205             ++recursion;
1206             if (recursion >= Options::maximumInliningRecursion()) {
1207                 if (verbose)
1208                     dataLog("    Failing because recursion detected.\n");
1209                 return UINT_MAX;
1210             }
1211         }
1212     }
1213     
1214     if (verbose)
1215         dataLog("    Inlining should be possible.\n");
1216     
1217     // It might be possible to inline.
1218     return codeBlock->instructionCount();
1219 }
1220
1221 void ByteCodeParser::inlineCall(Node* callTargetNode, int resultOperand, CallVariant callee, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind kind, CallerLinkability callerLinkability)
1222 {
1223     CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1224     
1225     ASSERT(inliningCost(callee, argumentCountIncludingThis, specializationKind) != UINT_MAX);
1226     
1227     CodeBlock* codeBlock = callee.functionExecutable()->baselineCodeBlockFor(specializationKind);
1228
1229     // FIXME: Don't flush constants!
1230     
1231     int inlineCallFrameStart = m_inlineStackTop->remapOperand(VirtualRegister(registerOffset)).offset() + JSStack::CallFrameHeaderSize;
1232     
1233     ensureLocals(
1234         VirtualRegister(inlineCallFrameStart).toLocal() + 1 +
1235         JSStack::CallFrameHeaderSize + codeBlock->m_numCalleeRegisters);
1236     
1237     size_t argumentPositionStart = m_graph.m_argumentPositions.size();
1238
1239     VirtualRegister resultReg(resultOperand);
1240     if (resultReg.isValid())
1241         resultReg = m_inlineStackTop->remapOperand(resultReg);
1242     
1243     InlineStackEntry inlineStackEntry(
1244         this, codeBlock, codeBlock, m_graph.lastBlock(), callee.function(), resultReg,
1245         (VirtualRegister)inlineCallFrameStart, argumentCountIncludingThis, kind);
1246     
1247     // This is where the actual inlining really happens.
1248     unsigned oldIndex = m_currentIndex;
1249     m_currentIndex = 0;
1250
1251     InlineVariableData inlineVariableData;
1252     inlineVariableData.inlineCallFrame = m_inlineStackTop->m_inlineCallFrame;
1253     inlineVariableData.argumentPositionStart = argumentPositionStart;
1254     inlineVariableData.calleeVariable = 0;
1255     
1256     RELEASE_ASSERT(
1257         m_inlineStackTop->m_inlineCallFrame->isClosureCall
1258         == callee.isClosureCall());
1259     if (callee.isClosureCall()) {
1260         VariableAccessData* calleeVariable =
1261             set(VirtualRegister(JSStack::Callee), callTargetNode, ImmediateNakedSet)->variableAccessData();
1262         
1263         calleeVariable->mergeShouldNeverUnbox(true);
1264         
1265         inlineVariableData.calleeVariable = calleeVariable;
1266     }
1267     
1268     m_graph.m_inlineVariableData.append(inlineVariableData);
1269     
1270     parseCodeBlock();
1271     clearCaches(); // Reset our state now that we're back to the outer code.
1272     
1273     m_currentIndex = oldIndex;
1274     
1275     // If the inlined code created some new basic blocks, then we have linking to do.
1276     if (inlineStackEntry.m_callsiteBlockHead != m_graph.lastBlock()) {
1277         
1278         ASSERT(!inlineStackEntry.m_unlinkedBlocks.isEmpty());
1279         if (inlineStackEntry.m_callsiteBlockHeadNeedsLinking)
1280             linkBlock(inlineStackEntry.m_callsiteBlockHead, inlineStackEntry.m_blockLinkingTargets);
1281         else
1282             ASSERT(inlineStackEntry.m_callsiteBlockHead->isLinked);
1283         
1284         if (callerLinkability == CallerDoesNormalLinking)
1285             cancelLinkingForBlock(inlineStackEntry.m_caller, inlineStackEntry.m_callsiteBlockHead);
1286         
1287         linkBlocks(inlineStackEntry.m_unlinkedBlocks, inlineStackEntry.m_blockLinkingTargets);
1288     } else
1289         ASSERT(inlineStackEntry.m_unlinkedBlocks.isEmpty());
1290     
1291     BasicBlock* lastBlock = m_graph.lastBlock();
1292     // If there was a return, but no early returns, then we're done. We allow parsing of
1293     // the caller to continue in whatever basic block we're in right now.
1294     if (!inlineStackEntry.m_didEarlyReturn && inlineStackEntry.m_didReturn) {
1295         ASSERT(lastBlock->isEmpty() || !lastBlock->last()->isTerminal());
1296         
1297         // If we created new blocks then the last block needs linking, but in the
1298         // caller. It doesn't need to be linked to, but it needs outgoing links.
1299         if (!inlineStackEntry.m_unlinkedBlocks.isEmpty()) {
1300             // For debugging purposes, set the bytecodeBegin. Note that this doesn't matter
1301             // for release builds because this block will never serve as a potential target
1302             // in the linker's binary search.
1303             lastBlock->bytecodeBegin = m_currentIndex;
1304             if (callerLinkability == CallerDoesNormalLinking) {
1305                 if (verbose)
1306                     dataLog("Adding unlinked block ", RawPointer(m_graph.lastBlock()), " (one return)\n");
1307                 m_inlineStackTop->m_caller->m_unlinkedBlocks.append(UnlinkedBlock(m_graph.lastBlock()));
1308             }
1309         }
1310         
1311         m_currentBlock = m_graph.lastBlock();
1312         return;
1313     }
1314     
1315     // If we get to this point then all blocks must end in some sort of terminals.
1316     ASSERT(lastBlock->last()->isTerminal());
1317
1318     // Need to create a new basic block for the continuation at the caller.
1319     RefPtr<BasicBlock> block = adoptRef(new BasicBlock(nextOffset, m_numArguments, m_numLocals, PNaN));
1320
1321     // Link the early returns to the basic block we're about to create.
1322     for (size_t i = 0; i < inlineStackEntry.m_unlinkedBlocks.size(); ++i) {
1323         if (!inlineStackEntry.m_unlinkedBlocks[i].m_needsEarlyReturnLinking)
1324             continue;
1325         BasicBlock* blockToLink = inlineStackEntry.m_unlinkedBlocks[i].m_block;
1326         ASSERT(!blockToLink->isLinked);
1327         Node* node = blockToLink->last();
1328         ASSERT(node->op() == Jump);
1329         ASSERT(!node->targetBlock());
1330         node->targetBlock() = block.get();
1331         inlineStackEntry.m_unlinkedBlocks[i].m_needsEarlyReturnLinking = false;
1332         if (verbose)
1333             dataLog("Marking ", RawPointer(blockToLink), " as linked (jumps to return)\n");
1334         blockToLink->didLink();
1335     }
1336     
1337     m_currentBlock = block.get();
1338     ASSERT(m_inlineStackTop->m_caller->m_blockLinkingTargets.isEmpty() || m_inlineStackTop->m_caller->m_blockLinkingTargets.last()->bytecodeBegin < nextOffset);
1339     if (verbose)
1340         dataLog("Adding unlinked block ", RawPointer(block.get()), " (many returns)\n");
1341     if (callerLinkability == CallerDoesNormalLinking) {
1342         m_inlineStackTop->m_caller->m_unlinkedBlocks.append(UnlinkedBlock(block.get()));
1343         m_inlineStackTop->m_caller->m_blockLinkingTargets.append(block.get());
1344     }
1345     m_graph.appendBlock(block);
1346     prepareToParseBlock();
1347 }
1348
1349 void ByteCodeParser::cancelLinkingForBlock(InlineStackEntry* inlineStackEntry, BasicBlock* block)
1350 {
1351     // It's possible that the callsite block head is not owned by the caller.
1352     if (!inlineStackEntry->m_unlinkedBlocks.isEmpty()) {
1353         // It's definitely owned by the caller, because the caller created new blocks.
1354         // Assert that this all adds up.
1355         ASSERT_UNUSED(block, inlineStackEntry->m_unlinkedBlocks.last().m_block == block);
1356         ASSERT(inlineStackEntry->m_unlinkedBlocks.last().m_needsNormalLinking);
1357         inlineStackEntry->m_unlinkedBlocks.last().m_needsNormalLinking = false;
1358     } else {
1359         // It's definitely not owned by the caller. Tell the caller that he does not
1360         // need to link his callsite block head, because we did it for him.
1361         ASSERT(inlineStackEntry->m_callsiteBlockHeadNeedsLinking);
1362         ASSERT_UNUSED(block, inlineStackEntry->m_callsiteBlockHead == block);
1363         inlineStackEntry->m_callsiteBlockHeadNeedsLinking = false;
1364     }
1365 }
1366
1367 bool ByteCodeParser::attemptToInlineCall(Node* callTargetNode, int resultOperand, CallVariant callee, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind kind, CallerLinkability callerLinkability, SpeculatedType prediction, unsigned& inliningBalance)
1368 {
1369     CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1370     
1371     if (!inliningBalance)
1372         return false;
1373     
1374     if (InternalFunction* function = callee.internalFunction()) {
1375         if (handleConstantInternalFunction(resultOperand, function, registerOffset, argumentCountIncludingThis, specializationKind)) {
1376             addToGraph(Phantom, callTargetNode);
1377             emitArgumentPhantoms(registerOffset, argumentCountIncludingThis, specializationKind);
1378             inliningBalance--;
1379             return true;
1380         }
1381         return false;
1382     }
1383     
1384     Intrinsic intrinsic = callee.intrinsicFor(specializationKind);
1385     if (intrinsic != NoIntrinsic) {
1386         if (handleIntrinsic(resultOperand, intrinsic, registerOffset, argumentCountIncludingThis, prediction)) {
1387             addToGraph(Phantom, callTargetNode);
1388             emitArgumentPhantoms(registerOffset, argumentCountIncludingThis, specializationKind);
1389             inliningBalance--;
1390             return true;
1391         }
1392         return false;
1393     }
1394     
1395     unsigned myInliningCost = inliningCost(callee, argumentCountIncludingThis, specializationKind);
1396     if (myInliningCost > inliningBalance)
1397         return false;
1398     
1399     inlineCall(callTargetNode, resultOperand, callee, registerOffset, argumentCountIncludingThis, nextOffset, kind, callerLinkability);
1400     inliningBalance -= myInliningCost;
1401     return true;
1402 }
1403
1404 bool ByteCodeParser::handleInlining(Node* callTargetNode, int resultOperand, const CallLinkStatus& callLinkStatus, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, NodeType callOp, InlineCallFrame::Kind kind, SpeculatedType prediction)
1405 {
1406     if (verbose) {
1407         dataLog("Handling inlining...\n");
1408         dataLog("Stack: ", currentCodeOrigin(), "\n");
1409     }
1410     CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1411     
1412     if (!callLinkStatus.size()) {
1413         if (verbose)
1414             dataLog("Bailing inlining.\n");
1415         return false;
1416     }
1417     
1418     unsigned inliningBalance = Options::maximumFunctionForCallInlineCandidateInstructionCount();
1419     if (specializationKind == CodeForConstruct)
1420         inliningBalance = std::min(inliningBalance, Options::maximumFunctionForConstructInlineCandidateInstructionCount());
1421     if (callLinkStatus.isClosureCall())
1422         inliningBalance = std::min(inliningBalance, Options::maximumFunctionForClosureCallInlineCandidateInstructionCount());
1423     
1424     // First check if we can avoid creating control flow. Our inliner does some CFG
1425     // simplification on the fly and this helps reduce compile times, but we can only leverage
1426     // this in cases where we don't need control flow diamonds to check the callee.
1427     if (!callLinkStatus.couldTakeSlowPath() && callLinkStatus.size() == 1) {
1428         emitFunctionChecks(
1429             callLinkStatus[0].callee(), callTargetNode, registerOffset, specializationKind);
1430         bool result = attemptToInlineCall(
1431             callTargetNode, resultOperand, callLinkStatus[0].callee(), registerOffset,
1432             argumentCountIncludingThis, nextOffset, kind, CallerDoesNormalLinking, prediction,
1433             inliningBalance);
1434         if (!result && !callLinkStatus.isProved())
1435             undoFunctionChecks(callLinkStatus[0].callee());
1436         if (verbose) {
1437             dataLog("Done inlining (simple).\n");
1438             dataLog("Stack: ", currentCodeOrigin(), "\n");
1439         }
1440         return result;
1441     }
1442     
1443     // We need to create some kind of switch over callee. For now we only do this if we believe that
1444     // we're in the top tier. We have two reasons for this: first, it provides us an opportunity to
1445     // do more detailed polyvariant/polymorphic profiling; and second, it reduces compile times in
1446     // the DFG. And by polyvariant profiling we mean polyvariant profiling of *this* call. Note that
1447     // we could improve that aspect of this by doing polymorphic inlining but having the profiling
1448     // also. Currently we opt against this, but it could be interesting. That would require having a
1449     // separate node for call edge profiling.
1450     // FIXME: Introduce the notion of a separate call edge profiling node.
1451     // https://bugs.webkit.org/show_bug.cgi?id=136033
1452     if (!isFTL(m_graph.m_plan.mode) || !Options::enablePolymorphicCallInlining()) {
1453         if (verbose) {
1454             dataLog("Bailing inlining (hard).\n");
1455             dataLog("Stack: ", currentCodeOrigin(), "\n");
1456         }
1457         return false;
1458     }
1459     
1460     unsigned oldOffset = m_currentIndex;
1461     
1462     bool allAreClosureCalls = true;
1463     bool allAreDirectCalls = true;
1464     for (unsigned i = callLinkStatus.size(); i--;) {
1465         if (callLinkStatus[i].callee().isClosureCall())
1466             allAreDirectCalls = false;
1467         else
1468             allAreClosureCalls = false;
1469     }
1470     
1471     Node* thingToSwitchOn;
1472     if (allAreDirectCalls)
1473         thingToSwitchOn = callTargetNode;
1474     else if (allAreClosureCalls)
1475         thingToSwitchOn = addToGraph(GetExecutable, callTargetNode);
1476     else {
1477         // FIXME: We should be able to handle this case, but it's tricky and we don't know of cases
1478         // where it would be beneficial. Also, CallLinkStatus would make all callees appear like
1479         // closure calls if any calls were closure calls - except for calls to internal functions.
1480         // So this will only arise if some callees are internal functions and others are closures.
1481         // https://bugs.webkit.org/show_bug.cgi?id=136020
1482         if (verbose) {
1483             dataLog("Bailing inlining (mix).\n");
1484             dataLog("Stack: ", currentCodeOrigin(), "\n");
1485         }
1486         return false;
1487     }
1488     
1489     if (verbose) {
1490         dataLog("Doing hard inlining...\n");
1491         dataLog("Stack: ", currentCodeOrigin(), "\n");
1492     }
1493     
1494     // This makes me wish that we were in SSA all the time. We need to pick a variable into which to
1495     // store the callee so that it will be accessible to all of the blocks we're about to create. We
1496     // get away with doing an immediate-set here because we wouldn't have performed any side effects
1497     // yet.
1498     if (verbose)
1499         dataLog("Register offset: ", registerOffset);
1500     VirtualRegister calleeReg(registerOffset + JSStack::Callee);
1501     calleeReg = m_inlineStackTop->remapOperand(calleeReg);
1502     if (verbose)
1503         dataLog("Callee is going to be ", calleeReg, "\n");
1504     setDirect(calleeReg, callTargetNode, ImmediateSetWithFlush);
1505     
1506     SwitchData& data = *m_graph.m_switchData.add();
1507     data.kind = SwitchCell;
1508     addToGraph(Switch, OpInfo(&data), thingToSwitchOn);
1509     
1510     BasicBlock* originBlock = m_currentBlock;
1511     if (verbose)
1512         dataLog("Marking ", RawPointer(originBlock), " as linked (origin of poly inline)\n");
1513     originBlock->didLink();
1514     cancelLinkingForBlock(m_inlineStackTop, originBlock);
1515     
1516     // Each inlined callee will have a landing block that it returns at. They should all have jumps
1517     // to the continuation block, which we create last.
1518     Vector<BasicBlock*> landingBlocks;
1519     
1520     // We make force this true if we give up on inlining any of the edges.
1521     bool couldTakeSlowPath = callLinkStatus.couldTakeSlowPath();
1522     
1523     if (verbose)
1524         dataLog("About to loop over functions at ", currentCodeOrigin(), ".\n");
1525     
1526     for (unsigned i = 0; i < callLinkStatus.size(); ++i) {
1527         m_currentIndex = oldOffset;
1528         RefPtr<BasicBlock> block = adoptRef(new BasicBlock(UINT_MAX, m_numArguments, m_numLocals, PNaN));
1529         m_currentBlock = block.get();
1530         m_graph.appendBlock(block);
1531         prepareToParseBlock();
1532         
1533         Node* myCallTargetNode = getDirect(calleeReg);
1534         
1535         bool inliningResult = attemptToInlineCall(
1536             myCallTargetNode, resultOperand, callLinkStatus[i].callee(), registerOffset,
1537             argumentCountIncludingThis, nextOffset, kind, CallerLinksManually, prediction,
1538             inliningBalance);
1539         
1540         if (!inliningResult) {
1541             // That failed so we let the block die. Nothing interesting should have been added to
1542             // the block. We also give up on inlining any of the (less frequent) callees.
1543             ASSERT(m_currentBlock == block.get());
1544             ASSERT(m_graph.m_blocks.last() == block);
1545             m_graph.killBlockAndItsContents(block.get());
1546             m_graph.m_blocks.removeLast();
1547             
1548             // The fact that inlining failed means we need a slow path.
1549             couldTakeSlowPath = true;
1550             break;
1551         }
1552         
1553         JSCell* thingToCaseOn;
1554         if (allAreDirectCalls)
1555             thingToCaseOn = callLinkStatus[i].callee().nonExecutableCallee();
1556         else {
1557             ASSERT(allAreClosureCalls);
1558             thingToCaseOn = callLinkStatus[i].callee().executable();
1559         }
1560         data.cases.append(SwitchCase(m_graph.freeze(thingToCaseOn), block.get()));
1561         m_currentIndex = nextOffset;
1562         processSetLocalQueue(); // This only comes into play for intrinsics, since normal inlined code will leave an empty queue.
1563         addToGraph(Jump);
1564         if (verbose)
1565             dataLog("Marking ", RawPointer(m_currentBlock), " as linked (tail of poly inlinee)\n");
1566         m_currentBlock->didLink();
1567         landingBlocks.append(m_currentBlock);
1568
1569         if (verbose)
1570             dataLog("Finished inlining ", callLinkStatus[i].callee(), " at ", currentCodeOrigin(), ".\n");
1571     }
1572     
1573     RefPtr<BasicBlock> slowPathBlock = adoptRef(
1574         new BasicBlock(UINT_MAX, m_numArguments, m_numLocals, PNaN));
1575     m_currentIndex = oldOffset;
1576     data.fallThrough = BranchTarget(slowPathBlock.get());
1577     m_graph.appendBlock(slowPathBlock);
1578     if (verbose)
1579         dataLog("Marking ", RawPointer(slowPathBlock.get()), " as linked (slow path block)\n");
1580     slowPathBlock->didLink();
1581     prepareToParseBlock();
1582     m_currentBlock = slowPathBlock.get();
1583     Node* myCallTargetNode = getDirect(calleeReg);
1584     if (couldTakeSlowPath) {
1585         addCall(
1586             resultOperand, callOp, OpInfo(), myCallTargetNode, argumentCountIncludingThis,
1587             registerOffset, prediction);
1588     } else {
1589         addToGraph(CheckBadCell);
1590         addToGraph(Phantom, myCallTargetNode);
1591         emitArgumentPhantoms(registerOffset, argumentCountIncludingThis, specializationKind);
1592         
1593         set(VirtualRegister(resultOperand), addToGraph(BottomValue));
1594     }
1595
1596     m_currentIndex = nextOffset;
1597     processSetLocalQueue();
1598     addToGraph(Jump);
1599     landingBlocks.append(m_currentBlock);
1600     
1601     RefPtr<BasicBlock> continuationBlock = adoptRef(
1602         new BasicBlock(UINT_MAX, m_numArguments, m_numLocals, PNaN));
1603     m_graph.appendBlock(continuationBlock);
1604     if (verbose)
1605         dataLog("Adding unlinked block ", RawPointer(continuationBlock.get()), " (continuation)\n");
1606     m_inlineStackTop->m_unlinkedBlocks.append(UnlinkedBlock(continuationBlock.get()));
1607     prepareToParseBlock();
1608     m_currentBlock = continuationBlock.get();
1609     
1610     for (unsigned i = landingBlocks.size(); i--;)
1611         landingBlocks[i]->last()->targetBlock() = continuationBlock.get();
1612     
1613     m_currentIndex = oldOffset;
1614     
1615     if (verbose) {
1616         dataLog("Done inlining (hard).\n");
1617         dataLog("Stack: ", currentCodeOrigin(), "\n");
1618     }
1619     return true;
1620 }
1621
1622 bool ByteCodeParser::handleMinMax(int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis)
1623 {
1624     if (argumentCountIncludingThis == 1) { // Math.min()
1625         set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN)));
1626         return true;
1627     }
1628      
1629     if (argumentCountIncludingThis == 2) { // Math.min(x)
1630         Node* result = get(VirtualRegister(virtualRegisterForArgument(1, registerOffset)));
1631         addToGraph(Phantom, Edge(result, NumberUse));
1632         set(VirtualRegister(resultOperand), result);
1633         return true;
1634     }
1635     
1636     if (argumentCountIncludingThis == 3) { // Math.min(x, y)
1637         set(VirtualRegister(resultOperand), addToGraph(op, get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset))));
1638         return true;
1639     }
1640     
1641     // Don't handle >=3 arguments for now.
1642     return false;
1643 }
1644
1645 bool ByteCodeParser::handleIntrinsic(int resultOperand, Intrinsic intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction)
1646 {
1647     switch (intrinsic) {
1648     case AbsIntrinsic: {
1649         if (argumentCountIncludingThis == 1) { // Math.abs()
1650             set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN)));
1651             return true;
1652         }
1653
1654         if (!MacroAssembler::supportsFloatingPointAbs())
1655             return false;
1656
1657         Node* node = addToGraph(ArithAbs, get(virtualRegisterForArgument(1, registerOffset)));
1658         if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
1659             node->mergeFlags(NodeMayOverflowInDFG);
1660         set(VirtualRegister(resultOperand), node);
1661         return true;
1662     }
1663
1664     case MinIntrinsic:
1665         return handleMinMax(resultOperand, ArithMin, registerOffset, argumentCountIncludingThis);
1666         
1667     case MaxIntrinsic:
1668         return handleMinMax(resultOperand, ArithMax, registerOffset, argumentCountIncludingThis);
1669         
1670     case SqrtIntrinsic:
1671     case CosIntrinsic:
1672     case SinIntrinsic: {
1673         if (argumentCountIncludingThis == 1) {
1674             set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN)));
1675             return true;
1676         }
1677         
1678         switch (intrinsic) {
1679         case SqrtIntrinsic:
1680             if (!MacroAssembler::supportsFloatingPointSqrt())
1681                 return false;
1682             
1683             set(VirtualRegister(resultOperand), addToGraph(ArithSqrt, get(virtualRegisterForArgument(1, registerOffset))));
1684             return true;
1685             
1686         case CosIntrinsic:
1687             set(VirtualRegister(resultOperand), addToGraph(ArithCos, get(virtualRegisterForArgument(1, registerOffset))));
1688             return true;
1689             
1690         case SinIntrinsic:
1691             set(VirtualRegister(resultOperand), addToGraph(ArithSin, get(virtualRegisterForArgument(1, registerOffset))));
1692             return true;
1693             
1694         default:
1695             RELEASE_ASSERT_NOT_REACHED();
1696             return false;
1697         }
1698     }
1699         
1700     case ArrayPushIntrinsic: {
1701         if (argumentCountIncludingThis != 2)
1702             return false;
1703         
1704         ArrayMode arrayMode = getArrayMode(m_currentInstruction[OPCODE_LENGTH(op_call) - 2].u.arrayProfile);
1705         if (!arrayMode.isJSArray())
1706             return false;
1707         switch (arrayMode.type()) {
1708         case Array::Undecided:
1709         case Array::Int32:
1710         case Array::Double:
1711         case Array::Contiguous:
1712         case Array::ArrayStorage: {
1713             Node* arrayPush = addToGraph(ArrayPush, OpInfo(arrayMode.asWord()), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)));
1714             set(VirtualRegister(resultOperand), arrayPush);
1715             
1716             return true;
1717         }
1718             
1719         default:
1720             return false;
1721         }
1722     }
1723         
1724     case ArrayPopIntrinsic: {
1725         if (argumentCountIncludingThis != 1)
1726             return false;
1727         
1728         ArrayMode arrayMode = getArrayMode(m_currentInstruction[OPCODE_LENGTH(op_call) - 2].u.arrayProfile);
1729         if (!arrayMode.isJSArray())
1730             return false;
1731         switch (arrayMode.type()) {
1732         case Array::Int32:
1733         case Array::Double:
1734         case Array::Contiguous:
1735         case Array::ArrayStorage: {
1736             Node* arrayPop = addToGraph(ArrayPop, OpInfo(arrayMode.asWord()), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)));
1737             set(VirtualRegister(resultOperand), arrayPop);
1738             return true;
1739         }
1740             
1741         default:
1742             return false;
1743         }
1744     }
1745
1746     case CharCodeAtIntrinsic: {
1747         if (argumentCountIncludingThis != 2)
1748             return false;
1749
1750         VirtualRegister thisOperand = virtualRegisterForArgument(0, registerOffset);
1751         VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
1752         Node* charCode = addToGraph(StringCharCodeAt, OpInfo(ArrayMode(Array::String).asWord()), get(thisOperand), get(indexOperand));
1753
1754         set(VirtualRegister(resultOperand), charCode);
1755         return true;
1756     }
1757
1758     case CharAtIntrinsic: {
1759         if (argumentCountIncludingThis != 2)
1760             return false;
1761
1762         VirtualRegister thisOperand = virtualRegisterForArgument(0, registerOffset);
1763         VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
1764         Node* charCode = addToGraph(StringCharAt, OpInfo(ArrayMode(Array::String).asWord()), get(thisOperand), get(indexOperand));
1765
1766         set(VirtualRegister(resultOperand), charCode);
1767         return true;
1768     }
1769     case FromCharCodeIntrinsic: {
1770         if (argumentCountIncludingThis != 2)
1771             return false;
1772
1773         VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
1774         Node* charCode = addToGraph(StringFromCharCode, get(indexOperand));
1775
1776         set(VirtualRegister(resultOperand), charCode);
1777
1778         return true;
1779     }
1780
1781     case RegExpExecIntrinsic: {
1782         if (argumentCountIncludingThis != 2)
1783             return false;
1784         
1785         Node* regExpExec = addToGraph(RegExpExec, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)));
1786         set(VirtualRegister(resultOperand), regExpExec);
1787         
1788         return true;
1789     }
1790         
1791     case RegExpTestIntrinsic: {
1792         if (argumentCountIncludingThis != 2)
1793             return false;
1794         
1795         Node* regExpExec = addToGraph(RegExpTest, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)));
1796         set(VirtualRegister(resultOperand), regExpExec);
1797         
1798         return true;
1799     }
1800
1801     case IMulIntrinsic: {
1802         if (argumentCountIncludingThis != 3)
1803             return false;
1804         VirtualRegister leftOperand = virtualRegisterForArgument(1, registerOffset);
1805         VirtualRegister rightOperand = virtualRegisterForArgument(2, registerOffset);
1806         Node* left = get(leftOperand);
1807         Node* right = get(rightOperand);
1808         set(VirtualRegister(resultOperand), addToGraph(ArithIMul, left, right));
1809         return true;
1810     }
1811         
1812     case FRoundIntrinsic: {
1813         if (argumentCountIncludingThis != 2)
1814             return false;
1815         VirtualRegister operand = virtualRegisterForArgument(1, registerOffset);
1816         set(VirtualRegister(resultOperand), addToGraph(ArithFRound, get(operand)));
1817         return true;
1818     }
1819         
1820     case DFGTrueIntrinsic: {
1821         set(VirtualRegister(resultOperand), jsConstant(jsBoolean(true)));
1822         return true;
1823     }
1824         
1825     case OSRExitIntrinsic: {
1826         addToGraph(ForceOSRExit);
1827         set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantUndefined)));
1828         return true;
1829     }
1830         
1831     case IsFinalTierIntrinsic: {
1832         set(VirtualRegister(resultOperand),
1833             jsConstant(jsBoolean(Options::useFTLJIT() ? isFTL(m_graph.m_plan.mode) : true)));
1834         return true;
1835     }
1836         
1837     case SetInt32HeapPredictionIntrinsic: {
1838         for (int i = 1; i < argumentCountIncludingThis; ++i) {
1839             Node* node = get(virtualRegisterForArgument(i, registerOffset));
1840             if (node->hasHeapPrediction())
1841                 node->setHeapPrediction(SpecInt32);
1842         }
1843         set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantUndefined)));
1844         return true;
1845     }
1846         
1847     case FiatInt52Intrinsic: {
1848         if (argumentCountIncludingThis != 2)
1849             return false;
1850         VirtualRegister operand = virtualRegisterForArgument(1, registerOffset);
1851         if (enableInt52())
1852             set(VirtualRegister(resultOperand), addToGraph(FiatInt52, get(operand)));
1853         else
1854             set(VirtualRegister(resultOperand), get(operand));
1855         return true;
1856     }
1857         
1858     default:
1859         return false;
1860     }
1861 }
1862
1863 bool ByteCodeParser::handleTypedArrayConstructor(
1864     int resultOperand, InternalFunction* function, int registerOffset,
1865     int argumentCountIncludingThis, TypedArrayType type)
1866 {
1867     if (!isTypedView(type))
1868         return false;
1869     
1870     if (function->classInfo() != constructorClassInfoForType(type))
1871         return false;
1872     
1873     if (function->globalObject() != m_inlineStackTop->m_codeBlock->globalObject())
1874         return false;
1875     
1876     // We only have an intrinsic for the case where you say:
1877     //
1878     // new FooArray(blah);
1879     //
1880     // Of course, 'blah' could be any of the following:
1881     //
1882     // - Integer, indicating that you want to allocate an array of that length.
1883     //   This is the thing we're hoping for, and what we can actually do meaningful
1884     //   optimizations for.
1885     //
1886     // - Array buffer, indicating that you want to create a view onto that _entire_
1887     //   buffer.
1888     //
1889     // - Non-buffer object, indicating that you want to create a copy of that
1890     //   object by pretending that it quacks like an array.
1891     //
1892     // - Anything else, indicating that you want to have an exception thrown at
1893     //   you.
1894     //
1895     // The intrinsic, NewTypedArray, will behave as if it could do any of these
1896     // things up until we do Fixup. Thereafter, if child1 (i.e. 'blah') is
1897     // predicted Int32, then we lock it in as a normal typed array allocation.
1898     // Otherwise, NewTypedArray turns into a totally opaque function call that
1899     // may clobber the world - by virtue of it accessing properties on what could
1900     // be an object.
1901     //
1902     // Note that although the generic form of NewTypedArray sounds sort of awful,
1903     // it is actually quite likely to be more efficient than a fully generic
1904     // Construct. So, we might want to think about making NewTypedArray variadic,
1905     // or else making Construct not super slow.
1906     
1907     if (argumentCountIncludingThis != 2)
1908         return false;
1909     
1910     set(VirtualRegister(resultOperand),
1911         addToGraph(NewTypedArray, OpInfo(type), get(virtualRegisterForArgument(1, registerOffset))));
1912     return true;
1913 }
1914
1915 bool ByteCodeParser::handleConstantInternalFunction(
1916     int resultOperand, InternalFunction* function, int registerOffset,
1917     int argumentCountIncludingThis, CodeSpecializationKind kind)
1918 {
1919     // If we ever find that we have a lot of internal functions that we specialize for,
1920     // then we should probably have some sort of hashtable dispatch, or maybe even
1921     // dispatch straight through the MethodTable of the InternalFunction. But for now,
1922     // it seems that this case is hit infrequently enough, and the number of functions
1923     // we know about is small enough, that having just a linear cascade of if statements
1924     // is good enough.
1925     
1926     if (function->classInfo() == ArrayConstructor::info()) {
1927         if (function->globalObject() != m_inlineStackTop->m_codeBlock->globalObject())
1928             return false;
1929         
1930         if (argumentCountIncludingThis == 2) {
1931             set(VirtualRegister(resultOperand),
1932                 addToGraph(NewArrayWithSize, OpInfo(ArrayWithUndecided), get(virtualRegisterForArgument(1, registerOffset))));
1933             return true;
1934         }
1935         
1936         for (int i = 1; i < argumentCountIncludingThis; ++i)
1937             addVarArgChild(get(virtualRegisterForArgument(i, registerOffset)));
1938         set(VirtualRegister(resultOperand),
1939             addToGraph(Node::VarArg, NewArray, OpInfo(ArrayWithUndecided), OpInfo(0)));
1940         return true;
1941     }
1942     
1943     if (function->classInfo() == StringConstructor::info()) {
1944         Node* result;
1945         
1946         if (argumentCountIncludingThis <= 1)
1947             result = jsConstant(m_vm->smallStrings.emptyString());
1948         else
1949             result = addToGraph(ToString, get(virtualRegisterForArgument(1, registerOffset)));
1950         
1951         if (kind == CodeForConstruct)
1952             result = addToGraph(NewStringObject, OpInfo(function->globalObject()->stringObjectStructure()), result);
1953         
1954         set(VirtualRegister(resultOperand), result);
1955         return true;
1956     }
1957     
1958     for (unsigned typeIndex = 0; typeIndex < NUMBER_OF_TYPED_ARRAY_TYPES; ++typeIndex) {
1959         bool result = handleTypedArrayConstructor(
1960             resultOperand, function, registerOffset, argumentCountIncludingThis,
1961             indexToTypedArrayType(typeIndex));
1962         if (result)
1963             return true;
1964     }
1965     
1966     return false;
1967 }
1968
1969 Node* ByteCodeParser::handleGetByOffset(SpeculatedType prediction, Node* base, const StructureSet& structureSet, unsigned identifierNumber, PropertyOffset offset, NodeType op)
1970 {
1971     if (base->hasConstant()) {
1972         if (JSValue constant = m_graph.tryGetConstantProperty(base->asJSValue(), structureSet, offset)) {
1973             addToGraph(Phantom, base);
1974             return weakJSConstant(constant);
1975         }
1976     }
1977     
1978     Node* propertyStorage;
1979     if (isInlineOffset(offset))
1980         propertyStorage = base;
1981     else
1982         propertyStorage = addToGraph(GetButterfly, base);
1983     
1984     StorageAccessData* data = m_graph.m_storageAccessData.add();
1985     data->offset = offset;
1986     data->identifierNumber = identifierNumber;
1987     
1988     Node* getByOffset = addToGraph(op, OpInfo(data), OpInfo(prediction), propertyStorage, base);
1989
1990     return getByOffset;
1991 }
1992
1993 Node* ByteCodeParser::handlePutByOffset(Node* base, unsigned identifier, PropertyOffset offset, Node* value)
1994 {
1995     Node* propertyStorage;
1996     if (isInlineOffset(offset))
1997         propertyStorage = base;
1998     else
1999         propertyStorage = addToGraph(GetButterfly, base);
2000     
2001     StorageAccessData* data = m_graph.m_storageAccessData.add();
2002     data->offset = offset;
2003     data->identifierNumber = identifier;
2004     
2005     Node* result = addToGraph(PutByOffset, OpInfo(data), propertyStorage, base, value);
2006     
2007     return result;
2008 }
2009
2010 void ByteCodeParser::emitChecks(const ConstantStructureCheckVector& vector)
2011 {
2012     for (unsigned i = 0; i < vector.size(); ++i)
2013         cellConstantWithStructureCheck(vector[i].constant(), vector[i].structure());
2014 }
2015
2016 void ByteCodeParser::handleGetById(
2017     int destinationOperand, SpeculatedType prediction, Node* base, unsigned identifierNumber,
2018     const GetByIdStatus& getByIdStatus)
2019 {
2020     NodeType getById = getByIdStatus.makesCalls() ? GetByIdFlush : GetById;
2021     
2022     if (!getByIdStatus.isSimple() || !getByIdStatus.numVariants() || !Options::enableAccessInlining()) {
2023         set(VirtualRegister(destinationOperand),
2024             addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base));
2025         return;
2026     }
2027     
2028     if (getByIdStatus.numVariants() > 1) {
2029         if (getByIdStatus.makesCalls() || !isFTL(m_graph.m_plan.mode)
2030             || !Options::enablePolymorphicAccessInlining()) {
2031             set(VirtualRegister(destinationOperand),
2032                 addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base));
2033             return;
2034         }
2035         
2036         if (m_graph.compilation())
2037             m_graph.compilation()->noticeInlinedGetById();
2038     
2039         // 1) Emit prototype structure checks for all chains. This could sort of maybe not be
2040         //    optimal, if there is some rarely executed case in the chain that requires a lot
2041         //    of checks and those checks are not watchpointable.
2042         for (unsigned variantIndex = getByIdStatus.numVariants(); variantIndex--;)
2043             emitChecks(getByIdStatus[variantIndex].constantChecks());
2044         
2045         // 2) Emit a MultiGetByOffset
2046         MultiGetByOffsetData* data = m_graph.m_multiGetByOffsetData.add();
2047         data->variants = getByIdStatus.variants();
2048         data->identifierNumber = identifierNumber;
2049         set(VirtualRegister(destinationOperand),
2050             addToGraph(MultiGetByOffset, OpInfo(data), OpInfo(prediction), base));
2051         return;
2052     }
2053     
2054     ASSERT(getByIdStatus.numVariants() == 1);
2055     GetByIdVariant variant = getByIdStatus[0];
2056                 
2057     if (m_graph.compilation())
2058         m_graph.compilation()->noticeInlinedGetById();
2059     
2060     Node* originalBase = base;
2061                 
2062     addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(variant.structureSet())), base);
2063     
2064     emitChecks(variant.constantChecks());
2065
2066     if (variant.alternateBase())
2067         base = weakJSConstant(variant.alternateBase());
2068     
2069     // Unless we want bugs like https://bugs.webkit.org/show_bug.cgi?id=88783, we need to
2070     // ensure that the base of the original get_by_id is kept alive until we're done with
2071     // all of the speculations. We only insert the Phantom if there had been a CheckStructure
2072     // on something other than the base following the CheckStructure on base.
2073     if (originalBase != base)
2074         addToGraph(Phantom, originalBase);
2075     
2076     Node* loadedValue = handleGetByOffset(
2077         variant.callLinkStatus() ? SpecCellOther : prediction,
2078         base, variant.baseStructure(), identifierNumber, variant.offset(),
2079         variant.callLinkStatus() ? GetGetterSetterByOffset : GetByOffset);
2080     
2081     if (!variant.callLinkStatus()) {
2082         set(VirtualRegister(destinationOperand), loadedValue);
2083         return;
2084     }
2085     
2086     Node* getter = addToGraph(GetGetter, loadedValue);
2087     
2088     // Make a call. We don't try to get fancy with using the smallest operand number because
2089     // the stack layout phase should compress the stack anyway.
2090     
2091     unsigned numberOfParameters = 0;
2092     numberOfParameters++; // The 'this' argument.
2093     numberOfParameters++; // True return PC.
2094     
2095     // Start with a register offset that corresponds to the last in-use register.
2096     int registerOffset = virtualRegisterForLocal(
2097         m_inlineStackTop->m_profiledBlock->m_numCalleeRegisters - 1).offset();
2098     registerOffset -= numberOfParameters;
2099     registerOffset -= JSStack::CallFrameHeaderSize;
2100     
2101     // Get the alignment right.
2102     registerOffset = -WTF::roundUpToMultipleOf(
2103         stackAlignmentRegisters(),
2104         -registerOffset);
2105     
2106     ensureLocals(
2107         m_inlineStackTop->remapOperand(
2108             VirtualRegister(registerOffset)).toLocal());
2109     
2110     // Issue SetLocals. This has two effects:
2111     // 1) That's how handleCall() sees the arguments.
2112     // 2) If we inline then this ensures that the arguments are flushed so that if you use
2113     //    the dreaded arguments object on the getter, the right things happen. Well, sort of -
2114     //    since we only really care about 'this' in this case. But we're not going to take that
2115     //    shortcut.
2116     int nextRegister = registerOffset + JSStack::CallFrameHeaderSize;
2117     set(VirtualRegister(nextRegister++), originalBase, ImmediateNakedSet);
2118     
2119     handleCall(
2120         destinationOperand, Call, InlineCallFrame::GetterCall, OPCODE_LENGTH(op_get_by_id),
2121         getter, numberOfParameters - 1, registerOffset, *variant.callLinkStatus(), prediction);
2122 }
2123
2124 void ByteCodeParser::emitPutById(
2125     Node* base, unsigned identifierNumber, Node* value, const PutByIdStatus& putByIdStatus, bool isDirect)
2126 {
2127     if (isDirect)
2128         addToGraph(PutByIdDirect, OpInfo(identifierNumber), base, value);
2129     else
2130         addToGraph(putByIdStatus.makesCalls() ? PutByIdFlush : PutById, OpInfo(identifierNumber), base, value);
2131 }
2132
2133 void ByteCodeParser::handlePutById(
2134     Node* base, unsigned identifierNumber, Node* value,
2135     const PutByIdStatus& putByIdStatus, bool isDirect)
2136 {
2137     if (!putByIdStatus.isSimple() || !putByIdStatus.numVariants() || !Options::enableAccessInlining()) {
2138         if (!putByIdStatus.isSet())
2139             addToGraph(ForceOSRExit);
2140         emitPutById(base, identifierNumber, value, putByIdStatus, isDirect);
2141         return;
2142     }
2143     
2144     if (putByIdStatus.numVariants() > 1) {
2145         if (!isFTL(m_graph.m_plan.mode) || putByIdStatus.makesCalls()
2146             || !Options::enablePolymorphicAccessInlining()) {
2147             emitPutById(base, identifierNumber, value, putByIdStatus, isDirect);
2148             return;
2149         }
2150         
2151         if (m_graph.compilation())
2152             m_graph.compilation()->noticeInlinedPutById();
2153         
2154         if (!isDirect) {
2155             for (unsigned variantIndex = putByIdStatus.numVariants(); variantIndex--;) {
2156                 if (putByIdStatus[variantIndex].kind() != PutByIdVariant::Transition)
2157                     continue;
2158                 emitChecks(putByIdStatus[variantIndex].constantChecks());
2159             }
2160         }
2161         
2162         MultiPutByOffsetData* data = m_graph.m_multiPutByOffsetData.add();
2163         data->variants = putByIdStatus.variants();
2164         data->identifierNumber = identifierNumber;
2165         addToGraph(MultiPutByOffset, OpInfo(data), base, value);
2166         return;
2167     }
2168     
2169     ASSERT(putByIdStatus.numVariants() == 1);
2170     const PutByIdVariant& variant = putByIdStatus[0];
2171     
2172     switch (variant.kind()) {
2173     case PutByIdVariant::Replace: {
2174         addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(variant.structure())), base);
2175         handlePutByOffset(base, identifierNumber, variant.offset(), value);
2176         if (m_graph.compilation())
2177             m_graph.compilation()->noticeInlinedPutById();
2178         return;
2179     }
2180     
2181     case PutByIdVariant::Transition: {
2182         addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(variant.oldStructure())), base);
2183         emitChecks(variant.constantChecks());
2184
2185         ASSERT(variant.oldStructureForTransition()->transitionWatchpointSetHasBeenInvalidated());
2186     
2187         Node* propertyStorage;
2188         Transition* transition = m_graph.m_transitions.add(
2189             variant.oldStructureForTransition(), variant.newStructure());
2190
2191         if (variant.reallocatesStorage()) {
2192
2193             // If we're growing the property storage then it must be because we're
2194             // storing into the out-of-line storage.
2195             ASSERT(!isInlineOffset(variant.offset()));
2196
2197             if (!variant.oldStructureForTransition()->outOfLineCapacity()) {
2198                 propertyStorage = addToGraph(
2199                     AllocatePropertyStorage, OpInfo(transition), base);
2200             } else {
2201                 propertyStorage = addToGraph(
2202                     ReallocatePropertyStorage, OpInfo(transition),
2203                     base, addToGraph(GetButterfly, base));
2204             }
2205         } else {
2206             if (isInlineOffset(variant.offset()))
2207                 propertyStorage = base;
2208             else
2209                 propertyStorage = addToGraph(GetButterfly, base);
2210         }
2211
2212         addToGraph(PutStructure, OpInfo(transition), base);
2213
2214         StorageAccessData* data = m_graph.m_storageAccessData.add();
2215         data->offset = variant.offset();
2216         data->identifierNumber = identifierNumber;
2217         
2218         addToGraph(
2219             PutByOffset,
2220             OpInfo(data),
2221             propertyStorage,
2222             base,
2223             value);
2224
2225         if (m_graph.compilation())
2226             m_graph.compilation()->noticeInlinedPutById();
2227         return;
2228     }
2229         
2230     case PutByIdVariant::Setter: {
2231         Node* originalBase = base;
2232         
2233         addToGraph(
2234             CheckStructure, OpInfo(m_graph.addStructureSet(variant.structure())), base);
2235         
2236         emitChecks(variant.constantChecks());
2237         
2238         if (variant.alternateBase())
2239             base = weakJSConstant(variant.alternateBase());
2240         
2241         Node* loadedValue = handleGetByOffset(
2242             SpecCellOther, base, variant.baseStructure(), identifierNumber, variant.offset(),
2243             GetGetterSetterByOffset);
2244         
2245         Node* setter = addToGraph(GetSetter, loadedValue);
2246         
2247         // Make a call. We don't try to get fancy with using the smallest operand number because
2248         // the stack layout phase should compress the stack anyway.
2249     
2250         unsigned numberOfParameters = 0;
2251         numberOfParameters++; // The 'this' argument.
2252         numberOfParameters++; // The new value.
2253         numberOfParameters++; // True return PC.
2254     
2255         // Start with a register offset that corresponds to the last in-use register.
2256         int registerOffset = virtualRegisterForLocal(
2257             m_inlineStackTop->m_profiledBlock->m_numCalleeRegisters - 1).offset();
2258         registerOffset -= numberOfParameters;
2259         registerOffset -= JSStack::CallFrameHeaderSize;
2260     
2261         // Get the alignment right.
2262         registerOffset = -WTF::roundUpToMultipleOf(
2263             stackAlignmentRegisters(),
2264             -registerOffset);
2265     
2266         ensureLocals(
2267             m_inlineStackTop->remapOperand(
2268                 VirtualRegister(registerOffset)).toLocal());
2269     
2270         int nextRegister = registerOffset + JSStack::CallFrameHeaderSize;
2271         set(VirtualRegister(nextRegister++), originalBase, ImmediateNakedSet);
2272         set(VirtualRegister(nextRegister++), value, ImmediateNakedSet);
2273     
2274         handleCall(
2275             VirtualRegister().offset(), Call, InlineCallFrame::SetterCall,
2276             OPCODE_LENGTH(op_put_by_id), setter, numberOfParameters - 1, registerOffset,
2277             *variant.callLinkStatus(), SpecOther);
2278         return;
2279     }
2280     
2281     default: {
2282         emitPutById(base, identifierNumber, value, putByIdStatus, isDirect);
2283         return;
2284     } }
2285 }
2286
2287 void ByteCodeParser::prepareToParseBlock()
2288 {
2289     clearCaches();
2290     ASSERT(m_setLocalQueue.isEmpty());
2291 }
2292
2293 void ByteCodeParser::clearCaches()
2294 {
2295     m_constants.resize(0);
2296 }
2297
2298 Node* ByteCodeParser::getScope(VirtualRegister scopeChain, unsigned skipCount)
2299 {
2300     Node* localBase = get(scopeChain);
2301     for (unsigned n = skipCount; n--;)
2302         localBase = addToGraph(SkipScope, localBase);
2303     return localBase;
2304 }
2305
2306 bool ByteCodeParser::parseBlock(unsigned limit)
2307 {
2308     bool shouldContinueParsing = true;
2309
2310     Interpreter* interpreter = m_vm->interpreter;
2311     Instruction* instructionsBegin = m_inlineStackTop->m_codeBlock->instructions().begin();
2312     unsigned blockBegin = m_currentIndex;
2313     
2314     // If we are the first basic block, introduce markers for arguments. This allows
2315     // us to track if a use of an argument may use the actual argument passed, as
2316     // opposed to using a value we set explicitly.
2317     if (m_currentBlock == m_graph.block(0) && !inlineCallFrame()) {
2318         m_graph.m_arguments.resize(m_numArguments);
2319         for (unsigned argument = 0; argument < m_numArguments; ++argument) {
2320             VariableAccessData* variable = newVariableAccessData(
2321                 virtualRegisterForArgument(argument), m_codeBlock->isCaptured(virtualRegisterForArgument(argument)));
2322             variable->mergeStructureCheckHoistingFailed(
2323                 m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
2324             variable->mergeCheckArrayHoistingFailed(
2325                 m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIndexingType));
2326             
2327             Node* setArgument = addToGraph(SetArgument, OpInfo(variable));
2328             m_graph.m_arguments[argument] = setArgument;
2329             m_currentBlock->variablesAtTail.setArgumentFirstTime(argument, setArgument);
2330         }
2331     }
2332
2333     while (true) {
2334         processSetLocalQueue();
2335         
2336         // Don't extend over jump destinations.
2337         if (m_currentIndex == limit) {
2338             // Ordinarily we want to plant a jump. But refuse to do this if the block is
2339             // empty. This is a special case for inlining, which might otherwise create
2340             // some empty blocks in some cases. When parseBlock() returns with an empty
2341             // block, it will get repurposed instead of creating a new one. Note that this
2342             // logic relies on every bytecode resulting in one or more nodes, which would
2343             // be true anyway except for op_loop_hint, which emits a Phantom to force this
2344             // to be true.
2345             if (!m_currentBlock->isEmpty())
2346                 addToGraph(Jump, OpInfo(m_currentIndex));
2347             return shouldContinueParsing;
2348         }
2349         
2350         // Switch on the current bytecode opcode.
2351         Instruction* currentInstruction = instructionsBegin + m_currentIndex;
2352         m_currentInstruction = currentInstruction; // Some methods want to use this, and we'd rather not thread it through calls.
2353         OpcodeID opcodeID = interpreter->getOpcodeID(currentInstruction->u.opcode);
2354         
2355         if (Options::verboseDFGByteCodeParsing())
2356             dataLog("    parsing ", currentCodeOrigin(), "\n");
2357         
2358         if (m_graph.compilation()) {
2359             addToGraph(CountExecution, OpInfo(m_graph.compilation()->executionCounterFor(
2360                 Profiler::OriginStack(*m_vm->m_perBytecodeProfiler, m_codeBlock, currentCodeOrigin()))));
2361         }
2362         
2363         switch (opcodeID) {
2364
2365         // === Function entry opcodes ===
2366
2367         case op_enter: {
2368             Node* undefined = addToGraph(JSConstant, OpInfo(m_constantUndefined));
2369             // Initialize all locals to undefined.
2370             for (int i = 0; i < m_inlineStackTop->m_codeBlock->m_numVars; ++i)
2371                 set(virtualRegisterForLocal(i), undefined, ImmediateNakedSet);
2372             if (m_inlineStackTop->m_codeBlock->specializationKind() == CodeForConstruct)
2373                 set(virtualRegisterForArgument(0), undefined, ImmediateNakedSet);
2374             NEXT_OPCODE(op_enter);
2375         }
2376             
2377         case op_touch_entry:
2378             if (m_inlineStackTop->m_codeBlock->symbolTable()->m_functionEnteredOnce.isStillValid())
2379                 addToGraph(ForceOSRExit);
2380             NEXT_OPCODE(op_touch_entry);
2381             
2382         case op_to_this: {
2383             Node* op1 = getThis();
2384             if (op1->op() != ToThis) {
2385                 Structure* cachedStructure = currentInstruction[2].u.structure.get();
2386                 if (currentInstruction[2].u.toThisStatus != ToThisOK
2387                     || !cachedStructure
2388                     || cachedStructure->classInfo()->methodTable.toThis != JSObject::info()->methodTable.toThis
2389                     || m_inlineStackTop->m_profiledBlock->couldTakeSlowCase(m_currentIndex)
2390                     || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)
2391                     || (op1->op() == GetLocal && op1->variableAccessData()->structureCheckHoistingFailed())) {
2392                     setThis(addToGraph(ToThis, op1));
2393                 } else {
2394                     addToGraph(
2395                         CheckStructure,
2396                         OpInfo(m_graph.addStructureSet(cachedStructure)),
2397                         op1);
2398                 }
2399             }
2400             NEXT_OPCODE(op_to_this);
2401         }
2402
2403         case op_create_this: {
2404             int calleeOperand = currentInstruction[2].u.operand;
2405             Node* callee = get(VirtualRegister(calleeOperand));
2406             bool alreadyEmitted = false;
2407             if (JSFunction* function = callee->dynamicCastConstant<JSFunction*>()) {
2408                 if (Structure* structure = function->allocationStructure()) {
2409                     addToGraph(AllocationProfileWatchpoint, OpInfo(m_graph.freeze(function)));
2410                     // The callee is still live up to this point.
2411                     addToGraph(Phantom, callee);
2412                     set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(NewObject, OpInfo(structure)));
2413                     alreadyEmitted = true;
2414                 }
2415             }
2416             if (!alreadyEmitted) {
2417                 set(VirtualRegister(currentInstruction[1].u.operand),
2418                     addToGraph(CreateThis, OpInfo(currentInstruction[3].u.operand), callee));
2419             }
2420             NEXT_OPCODE(op_create_this);
2421         }
2422
2423         case op_new_object: {
2424             set(VirtualRegister(currentInstruction[1].u.operand),
2425                 addToGraph(NewObject,
2426                     OpInfo(currentInstruction[3].u.objectAllocationProfile->structure())));
2427             NEXT_OPCODE(op_new_object);
2428         }
2429             
2430         case op_new_array: {
2431             int startOperand = currentInstruction[2].u.operand;
2432             int numOperands = currentInstruction[3].u.operand;
2433             ArrayAllocationProfile* profile = currentInstruction[4].u.arrayAllocationProfile;
2434             for (int operandIdx = startOperand; operandIdx > startOperand - numOperands; --operandIdx)
2435                 addVarArgChild(get(VirtualRegister(operandIdx)));
2436             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(Node::VarArg, NewArray, OpInfo(profile->selectIndexingType()), OpInfo(0)));
2437             NEXT_OPCODE(op_new_array);
2438         }
2439             
2440         case op_new_array_with_size: {
2441             int lengthOperand = currentInstruction[2].u.operand;
2442             ArrayAllocationProfile* profile = currentInstruction[3].u.arrayAllocationProfile;
2443             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(NewArrayWithSize, OpInfo(profile->selectIndexingType()), get(VirtualRegister(lengthOperand))));
2444             NEXT_OPCODE(op_new_array_with_size);
2445         }
2446             
2447         case op_new_array_buffer: {
2448             int startConstant = currentInstruction[2].u.operand;
2449             int numConstants = currentInstruction[3].u.operand;
2450             ArrayAllocationProfile* profile = currentInstruction[4].u.arrayAllocationProfile;
2451             NewArrayBufferData data;
2452             data.startConstant = m_inlineStackTop->m_constantBufferRemap[startConstant];
2453             data.numConstants = numConstants;
2454             data.indexingType = profile->selectIndexingType();
2455
2456             // If this statement has never executed, we'll have the wrong indexing type in the profile.
2457             for (int i = 0; i < numConstants; ++i) {
2458                 data.indexingType =
2459                     leastUpperBoundOfIndexingTypeAndValue(
2460                         data.indexingType,
2461                         m_codeBlock->constantBuffer(data.startConstant)[i]);
2462             }
2463             
2464             m_graph.m_newArrayBufferData.append(data);
2465             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(NewArrayBuffer, OpInfo(&m_graph.m_newArrayBufferData.last())));
2466             NEXT_OPCODE(op_new_array_buffer);
2467         }
2468             
2469         case op_new_regexp: {
2470             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(NewRegexp, OpInfo(currentInstruction[2].u.operand)));
2471             NEXT_OPCODE(op_new_regexp);
2472         }
2473             
2474         case op_get_callee: {
2475             JSCell* cachedFunction = currentInstruction[2].u.jsCell.get();
2476             if (!cachedFunction 
2477                 || m_inlineStackTop->m_profiledBlock->couldTakeSlowCase(m_currentIndex)
2478                 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell)) {
2479                 set(VirtualRegister(currentInstruction[1].u.operand), get(VirtualRegister(JSStack::Callee)));
2480             } else {
2481                 FrozenValue* frozen = m_graph.freeze(cachedFunction);
2482                 ASSERT(cachedFunction->inherits(JSFunction::info()));
2483                 Node* actualCallee = get(VirtualRegister(JSStack::Callee));
2484                 addToGraph(CheckCell, OpInfo(frozen), actualCallee);
2485                 set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(JSConstant, OpInfo(frozen)));
2486             }
2487             NEXT_OPCODE(op_get_callee);
2488         }
2489
2490         // === Bitwise operations ===
2491
2492         case op_bitand: {
2493             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2494             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2495             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(BitAnd, op1, op2));
2496             NEXT_OPCODE(op_bitand);
2497         }
2498
2499         case op_bitor: {
2500             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2501             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2502             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(BitOr, op1, op2));
2503             NEXT_OPCODE(op_bitor);
2504         }
2505
2506         case op_bitxor: {
2507             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2508             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2509             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(BitXor, op1, op2));
2510             NEXT_OPCODE(op_bitxor);
2511         }
2512
2513         case op_rshift: {
2514             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2515             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2516             set(VirtualRegister(currentInstruction[1].u.operand),
2517                 addToGraph(BitRShift, op1, op2));
2518             NEXT_OPCODE(op_rshift);
2519         }
2520
2521         case op_lshift: {
2522             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2523             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2524             set(VirtualRegister(currentInstruction[1].u.operand),
2525                 addToGraph(BitLShift, op1, op2));
2526             NEXT_OPCODE(op_lshift);
2527         }
2528
2529         case op_urshift: {
2530             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2531             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2532             set(VirtualRegister(currentInstruction[1].u.operand),
2533                 addToGraph(BitURShift, op1, op2));
2534             NEXT_OPCODE(op_urshift);
2535         }
2536             
2537         case op_unsigned: {
2538             set(VirtualRegister(currentInstruction[1].u.operand),
2539                 makeSafe(addToGraph(UInt32ToNumber, get(VirtualRegister(currentInstruction[2].u.operand)))));
2540             NEXT_OPCODE(op_unsigned);
2541         }
2542
2543         // === Increment/Decrement opcodes ===
2544
2545         case op_inc: {
2546             int srcDst = currentInstruction[1].u.operand;
2547             VirtualRegister srcDstVirtualRegister = VirtualRegister(srcDst);
2548             Node* op = get(srcDstVirtualRegister);
2549             set(srcDstVirtualRegister, makeSafe(addToGraph(ArithAdd, op, addToGraph(JSConstant, OpInfo(m_constantOne)))));
2550             NEXT_OPCODE(op_inc);
2551         }
2552
2553         case op_dec: {
2554             int srcDst = currentInstruction[1].u.operand;
2555             VirtualRegister srcDstVirtualRegister = VirtualRegister(srcDst);
2556             Node* op = get(srcDstVirtualRegister);
2557             set(srcDstVirtualRegister, makeSafe(addToGraph(ArithSub, op, addToGraph(JSConstant, OpInfo(m_constantOne)))));
2558             NEXT_OPCODE(op_dec);
2559         }
2560
2561         // === Arithmetic operations ===
2562
2563         case op_add: {
2564             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2565             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2566             if (op1->hasNumberResult() && op2->hasNumberResult())
2567                 set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithAdd, op1, op2)));
2568             else
2569                 set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ValueAdd, op1, op2)));
2570             NEXT_OPCODE(op_add);
2571         }
2572
2573         case op_sub: {
2574             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2575             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2576             set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithSub, op1, op2)));
2577             NEXT_OPCODE(op_sub);
2578         }
2579
2580         case op_negate: {
2581             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2582             set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithNegate, op1)));
2583             NEXT_OPCODE(op_negate);
2584         }
2585
2586         case op_mul: {
2587             // Multiply requires that the inputs are not truncated, unfortunately.
2588             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2589             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2590             set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithMul, op1, op2)));
2591             NEXT_OPCODE(op_mul);
2592         }
2593
2594         case op_mod: {
2595             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2596             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2597             set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithMod, op1, op2)));
2598             NEXT_OPCODE(op_mod);
2599         }
2600
2601         case op_div: {
2602             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2603             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2604             set(VirtualRegister(currentInstruction[1].u.operand), makeDivSafe(addToGraph(ArithDiv, op1, op2)));
2605             NEXT_OPCODE(op_div);
2606         }
2607
2608         // === Misc operations ===
2609
2610         case op_debug:
2611             addToGraph(Breakpoint);
2612             NEXT_OPCODE(op_debug);
2613
2614         case op_profile_will_call: {
2615             addToGraph(ProfileWillCall);
2616             NEXT_OPCODE(op_profile_will_call);
2617         }
2618
2619         case op_profile_did_call: {
2620             addToGraph(ProfileDidCall);
2621             NEXT_OPCODE(op_profile_did_call);
2622         }
2623
2624         case op_mov: {
2625             Node* op = get(VirtualRegister(currentInstruction[2].u.operand));
2626             set(VirtualRegister(currentInstruction[1].u.operand), op);
2627             NEXT_OPCODE(op_mov);
2628         }
2629
2630         case op_check_has_instance:
2631             addToGraph(CheckHasInstance, get(VirtualRegister(currentInstruction[3].u.operand)));
2632             NEXT_OPCODE(op_check_has_instance);
2633
2634         case op_instanceof: {
2635             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2636             Node* prototype = get(VirtualRegister(currentInstruction[3].u.operand));
2637             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(InstanceOf, value, prototype));
2638             NEXT_OPCODE(op_instanceof);
2639         }
2640             
2641         case op_is_undefined: {
2642             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2643             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsUndefined, value));
2644             NEXT_OPCODE(op_is_undefined);
2645         }
2646
2647         case op_is_boolean: {
2648             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2649             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsBoolean, value));
2650             NEXT_OPCODE(op_is_boolean);
2651         }
2652
2653         case op_is_number: {
2654             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2655             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsNumber, value));
2656             NEXT_OPCODE(op_is_number);
2657         }
2658
2659         case op_is_string: {
2660             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2661             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsString, value));
2662             NEXT_OPCODE(op_is_string);
2663         }
2664
2665         case op_is_object: {
2666             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2667             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsObject, value));
2668             NEXT_OPCODE(op_is_object);
2669         }
2670
2671         case op_is_function: {
2672             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2673             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsFunction, value));
2674             NEXT_OPCODE(op_is_function);
2675         }
2676
2677         case op_not: {
2678             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2679             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, value));
2680             NEXT_OPCODE(op_not);
2681         }
2682             
2683         case op_to_primitive: {
2684             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2685             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(ToPrimitive, value));
2686             NEXT_OPCODE(op_to_primitive);
2687         }
2688             
2689         case op_strcat: {
2690             int startOperand = currentInstruction[2].u.operand;
2691             int numOperands = currentInstruction[3].u.operand;
2692 #if CPU(X86)
2693             // X86 doesn't have enough registers to compile MakeRope with three arguments.
2694             // Rather than try to be clever, we just make MakeRope dumber on this processor.
2695             const unsigned maxRopeArguments = 2;
2696 #else
2697             const unsigned maxRopeArguments = 3;
2698 #endif
2699             auto toStringNodes = std::make_unique<Node*[]>(numOperands);
2700             for (int i = 0; i < numOperands; i++)
2701                 toStringNodes[i] = addToGraph(ToString, get(VirtualRegister(startOperand - i)));
2702
2703             for (int i = 0; i < numOperands; i++)
2704                 addToGraph(Phantom, toStringNodes[i]);
2705
2706             Node* operands[AdjacencyList::Size];
2707             unsigned indexInOperands = 0;
2708             for (unsigned i = 0; i < AdjacencyList::Size; ++i)
2709                 operands[i] = 0;
2710             for (int operandIdx = 0; operandIdx < numOperands; ++operandIdx) {
2711                 if (indexInOperands == maxRopeArguments) {
2712                     operands[0] = addToGraph(MakeRope, operands[0], operands[1], operands[2]);
2713                     for (unsigned i = 1; i < AdjacencyList::Size; ++i)
2714                         operands[i] = 0;
2715                     indexInOperands = 1;
2716                 }
2717                 
2718                 ASSERT(indexInOperands < AdjacencyList::Size);
2719                 ASSERT(indexInOperands < maxRopeArguments);
2720                 operands[indexInOperands++] = toStringNodes[operandIdx];
2721             }
2722             set(VirtualRegister(currentInstruction[1].u.operand),
2723                 addToGraph(MakeRope, operands[0], operands[1], operands[2]));
2724             NEXT_OPCODE(op_strcat);
2725         }
2726
2727         case op_less: {
2728             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2729             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2730             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareLess, op1, op2));
2731             NEXT_OPCODE(op_less);
2732         }
2733
2734         case op_lesseq: {
2735             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2736             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2737             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareLessEq, op1, op2));
2738             NEXT_OPCODE(op_lesseq);
2739         }
2740
2741         case op_greater: {
2742             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2743             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2744             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareGreater, op1, op2));
2745             NEXT_OPCODE(op_greater);
2746         }
2747
2748         case op_greatereq: {
2749             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2750             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2751             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareGreaterEq, op1, op2));
2752             NEXT_OPCODE(op_greatereq);
2753         }
2754
2755         case op_eq: {
2756             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2757             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2758             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareEq, op1, op2));
2759             NEXT_OPCODE(op_eq);
2760         }
2761
2762         case op_eq_null: {
2763             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2764             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareEqConstant, value, addToGraph(JSConstant, OpInfo(m_constantNull))));
2765             NEXT_OPCODE(op_eq_null);
2766         }
2767
2768         case op_stricteq: {
2769             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2770             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2771             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareStrictEq, op1, op2));
2772             NEXT_OPCODE(op_stricteq);
2773         }
2774
2775         case op_neq: {
2776             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2777             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2778             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, addToGraph(CompareEq, op1, op2)));
2779             NEXT_OPCODE(op_neq);
2780         }
2781
2782         case op_neq_null: {
2783             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2784             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, addToGraph(CompareEqConstant, value, addToGraph(JSConstant, OpInfo(m_constantNull)))));
2785             NEXT_OPCODE(op_neq_null);
2786         }
2787
2788         case op_nstricteq: {
2789             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2790             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2791             Node* invertedResult;
2792             invertedResult = addToGraph(CompareStrictEq, op1, op2);
2793             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, invertedResult));
2794             NEXT_OPCODE(op_nstricteq);
2795         }
2796
2797         // === Property access operations ===
2798
2799         case op_get_by_val: {
2800             SpeculatedType prediction = getPredictionWithoutOSRExit();
2801             
2802             Node* base = get(VirtualRegister(currentInstruction[2].u.operand));
2803             ArrayMode arrayMode = getArrayModeConsideringSlowPath(currentInstruction[4].u.arrayProfile, Array::Read);
2804             Node* property = get(VirtualRegister(currentInstruction[3].u.operand));
2805             Node* getByVal = addToGraph(GetByVal, OpInfo(arrayMode.asWord()), OpInfo(prediction), base, property);
2806             set(VirtualRegister(currentInstruction[1].u.operand), getByVal);
2807
2808             NEXT_OPCODE(op_get_by_val);
2809         }
2810
2811         case op_put_by_val_direct:
2812         case op_put_by_val: {
2813             Node* base = get(VirtualRegister(currentInstruction[1].u.operand));
2814
2815             ArrayMode arrayMode = getArrayModeConsideringSlowPath(currentInstruction[4].u.arrayProfile, Array::Write);
2816             
2817             Node* property = get(VirtualRegister(currentInstruction[2].u.operand));
2818             Node* value = get(VirtualRegister(currentInstruction[3].u.operand));
2819             
2820             addVarArgChild(base);
2821             addVarArgChild(property);
2822             addVarArgChild(value);
2823             addVarArgChild(0); // Leave room for property storage.
2824             addVarArgChild(0); // Leave room for length.
2825             addToGraph(Node::VarArg, opcodeID == op_put_by_val_direct ? PutByValDirect : PutByVal, OpInfo(arrayMode.asWord()), OpInfo(0));
2826
2827             NEXT_OPCODE(op_put_by_val);
2828         }
2829             
2830         case op_get_by_id:
2831         case op_get_by_id_out_of_line:
2832         case op_get_array_length: {
2833             SpeculatedType prediction = getPrediction();
2834             
2835             Node* base = get(VirtualRegister(currentInstruction[2].u.operand));
2836             unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand];
2837             
2838             AtomicStringImpl* uid = m_graph.identifiers()[identifierNumber];
2839             GetByIdStatus getByIdStatus = GetByIdStatus::computeFor(
2840                 m_inlineStackTop->m_profiledBlock, m_dfgCodeBlock,
2841                 m_inlineStackTop->m_stubInfos, m_dfgStubInfos,
2842                 currentCodeOrigin(), uid);
2843             
2844             handleGetById(
2845                 currentInstruction[1].u.operand, prediction, base, identifierNumber, getByIdStatus);
2846
2847             NEXT_OPCODE(op_get_by_id);
2848         }
2849         case op_put_by_id:
2850         case op_put_by_id_out_of_line:
2851         case op_put_by_id_transition_direct:
2852         case op_put_by_id_transition_normal:
2853         case op_put_by_id_transition_direct_out_of_line:
2854         case op_put_by_id_transition_normal_out_of_line: {
2855             Node* value = get(VirtualRegister(currentInstruction[3].u.operand));
2856             Node* base = get(VirtualRegister(currentInstruction[1].u.operand));
2857             unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
2858             bool direct = currentInstruction[8].u.operand;
2859
2860             PutByIdStatus putByIdStatus = PutByIdStatus::computeFor(
2861                 m_inlineStackTop->m_profiledBlock, m_dfgCodeBlock,
2862                 m_inlineStackTop->m_stubInfos, m_dfgStubInfos,
2863                 currentCodeOrigin(), m_graph.identifiers()[identifierNumber]);
2864             
2865             handlePutById(base, identifierNumber, value, putByIdStatus, direct);
2866             NEXT_OPCODE(op_put_by_id);
2867         }
2868
2869         case op_init_global_const_nop: {
2870             NEXT_OPCODE(op_init_global_const_nop);
2871         }
2872
2873         case op_init_global_const: {
2874             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2875             addToGraph(
2876                 PutGlobalVar,
2877                 OpInfo(m_inlineStackTop->m_codeBlock->globalObject()->assertRegisterIsInThisObject(currentInstruction[1].u.registerPointer)),
2878                 value);
2879             NEXT_OPCODE(op_init_global_const);
2880         }
2881
2882         case op_profile_type: {
2883             Node* valueToProfile = get(VirtualRegister(currentInstruction[1].u.operand));
2884             addToGraph(ProfileType, OpInfo(currentInstruction[2].u.location), valueToProfile);
2885             NEXT_OPCODE(op_profile_type);
2886         }
2887
2888         case op_profile_control_flow: {
2889             BasicBlockLocation* basicBlockLocation = currentInstruction[1].u.basicBlockLocation;
2890             addToGraph(ProfileControlFlow, OpInfo(basicBlockLocation));
2891             NEXT_OPCODE(op_profile_control_flow);
2892         }
2893
2894         // === Block terminators. ===
2895
2896         case op_jmp: {
2897             int relativeOffset = currentInstruction[1].u.operand;
2898             if (relativeOffset <= 0)
2899                 flushForTerminal();
2900             addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
2901             LAST_OPCODE(op_jmp);
2902         }
2903
2904         case op_jtrue: {
2905             unsigned relativeOffset = currentInstruction[2].u.operand;
2906             Node* condition = get(VirtualRegister(currentInstruction[1].u.operand));
2907             addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jtrue))), condition);
2908             LAST_OPCODE(op_jtrue);
2909         }
2910
2911         case op_jfalse: {
2912             unsigned relativeOffset = currentInstruction[2].u.operand;
2913             Node* condition = get(VirtualRegister(currentInstruction[1].u.operand));
2914             addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jfalse), m_currentIndex + relativeOffset)), condition);
2915             LAST_OPCODE(op_jfalse);
2916         }
2917
2918         case op_jeq_null: {
2919             unsigned relativeOffset = currentInstruction[2].u.operand;
2920             Node* value = get(VirtualRegister(currentInstruction[1].u.operand));
2921             Node* condition = addToGraph(CompareEqConstant, value, addToGraph(JSConstant, OpInfo(m_constantNull)));
2922             addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jeq_null))), condition);
2923             LAST_OPCODE(op_jeq_null);
2924         }
2925
2926         case op_jneq_null: {
2927             unsigned relativeOffset = currentInstruction[2].u.operand;
2928             Node* value = get(VirtualRegister(currentInstruction[1].u.operand));
2929             Node* condition = addToGraph(CompareEqConstant, value, addToGraph(JSConstant, OpInfo(m_constantNull)));
2930             addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jneq_null), m_currentIndex + relativeOffset)), condition);
2931             LAST_OPCODE(op_jneq_null);
2932         }
2933
2934         case op_jless: {
2935             unsigned relativeOffset = currentInstruction[3].u.operand;
2936             Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
2937             Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
2938             Node* condition = addToGraph(CompareLess, op1, op2);
2939             addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jless))), condition);
2940             LAST_OPCODE(op_jless);
2941         }
2942
2943         case op_jlesseq: {
2944             unsigned relativeOffset = currentInstruction[3].u.operand;
2945             Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
2946             Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
2947             Node* condition = addToGraph(CompareLessEq, op1, op2);
2948             addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jlesseq))), condition);
2949             LAST_OPCODE(op_jlesseq);
2950         }
2951
2952         case op_jgreater: {
2953             unsigned relativeOffset = currentInstruction[3].u.operand;
2954             Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
2955             Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
2956             Node* condition = addToGraph(CompareGreater, op1, op2);
2957             addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jgreater))), condition);
2958             LAST_OPCODE(op_jgreater);
2959         }
2960
2961         case op_jgreatereq: {
2962             unsigned relativeOffset = currentInstruction[3].u.operand;
2963             Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
2964             Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
2965             Node* condition = addToGraph(CompareGreaterEq, op1, op2);
2966             addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jgreatereq))), condition);
2967             LAST_OPCODE(op_jgreatereq);
2968         }
2969
2970         case op_jnless: {
2971             unsigned relativeOffset = currentInstruction[3].u.operand;
2972             Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
2973             Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
2974             Node* condition = addToGraph(CompareLess, op1, op2);
2975             addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jnless), m_currentIndex + relativeOffset)), condition);
2976             LAST_OPCODE(op_jnless);
2977         }
2978
2979         case op_jnlesseq: {
2980             unsigned relativeOffset = currentInstruction[3].u.operand;
2981             Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
2982             Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
2983             Node* condition = addToGraph(CompareLessEq, op1, op2);
2984             addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jnlesseq), m_currentIndex + relativeOffset)), condition);
2985             LAST_OPCODE(op_jnlesseq);
2986         }
2987
2988         case op_jngreater: {
2989             unsigned relativeOffset = currentInstruction[3].u.operand;
2990             Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
2991             Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
2992             Node* condition = addToGraph(CompareGreater, op1, op2);
2993             addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jngreater), m_currentIndex + relativeOffset)), condition);
2994             LAST_OPCODE(op_jngreater);
2995         }
2996
2997         case op_jngreatereq: {
2998             unsigned relativeOffset = currentInstruction[3].u.operand;
2999             Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
3000             Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
3001             Node* condition = addToGraph(CompareGreaterEq, op1, op2);
3002             addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jngreatereq), m_currentIndex + relativeOffset)), condition);
3003             LAST_OPCODE(op_jngreatereq);
3004         }
3005             
3006         case op_switch_imm: {
3007             SwitchData& data = *m_graph.m_switchData.add();
3008             data.kind = SwitchImm;
3009             data.switchTableIndex = m_inlineStackTop->m_switchRemap[currentInstruction[1].u.operand];
3010             data.fallThrough.setBytecodeIndex(m_currentIndex + currentInstruction[2].u.operand);
3011             SimpleJumpTable& table = m_codeBlock->switchJumpTable(data.switchTableIndex);
3012             for (unsigned i = 0; i < table.branchOffsets.size(); ++i) {
3013                 if (!table.branchOffsets[i])
3014                     continue;
3015                 unsigned target = m_currentIndex + table.branchOffsets[i];
3016                 if (target == data.fallThrough.bytecodeIndex())
3017                     continue;
3018                 data.cases.append(SwitchCase::withBytecodeIndex(m_graph.freeze(jsNumber(static_cast<int32_t>(table.min + i))), target));
3019             }
3020             flushIfTerminal(data);
3021             addToGraph(Switch, OpInfo(&data), get(VirtualRegister(currentInstruction[3].u.operand)));
3022             LAST_OPCODE(op_switch_imm);
3023         }
3024             
3025         case op_switch_char: {
3026             SwitchData& data = *m_graph.m_switchData.add();
3027             data.kind = SwitchChar;
3028             data.switchTableIndex = m_inlineStackTop->m_switchRemap[currentInstruction[1].u.operand];
3029             data.fallThrough.setBytecodeIndex(m_currentIndex + currentInstruction[2].u.operand);
3030             SimpleJumpTable& table = m_codeBlock->switchJumpTable(data.switchTableIndex);
3031             for (unsigned i = 0; i < table.branchOffsets.size(); ++i) {
3032                 if (!table.branchOffsets[i])
3033                     continue;
3034                 unsigned target = m_currentIndex + table.branchOffsets[i];
3035                 if (target == data.fallThrough.bytecodeIndex())
3036                     continue;
3037                 data.cases.append(
3038                     SwitchCase::withBytecodeIndex(LazyJSValue::singleCharacterString(table.min + i), target));
3039             }
3040             flushIfTerminal(data);
3041             addToGraph(Switch, OpInfo(&data), get(VirtualRegister(currentInstruction[3].u.operand)));
3042             LAST_OPCODE(op_switch_char);
3043         }
3044
3045         case op_switch_string: {
3046             SwitchData& data = *m_graph.m_switchData.add();
3047             data.kind = SwitchString;
3048             data.switchTableIndex = currentInstruction[1].u.operand;
3049             data.fallThrough.setBytecodeIndex(m_currentIndex + currentInstruction[2].u.operand);
3050             StringJumpTable& table = m_codeBlock->stringSwitchJumpTable(data.switchTableIndex);
3051             StringJumpTable::StringOffsetTable::iterator iter;
3052             StringJumpTable::StringOffsetTable::iterator end = table.offsetTable.end();
3053             for (iter = table.offsetTable.begin(); iter != end; ++iter) {
3054                 unsigned target = m_currentIndex + iter->value.branchOffset;
3055                 if (target == data.fallThrough.bytecodeIndex())
3056                     continue;
3057                 data.cases.append(
3058                     SwitchCase::withBytecodeIndex(LazyJSValue::knownStringImpl(iter->key.get()), target));
3059             }
3060             flushIfTerminal(data);
3061             addToGraph(Switch, OpInfo(&data), get(VirtualRegister(currentInstruction[3].u.operand)));
3062             LAST_OPCODE(op_switch_string);
3063         }
3064
3065         case op_ret:
3066             flushForReturn();
3067             if (inlineCallFrame()) {
3068                 if (m_inlineStackTop->m_returnValue.isValid())
3069                     setDirect(m_inlineStackTop->m_returnValue, get(VirtualRegister(currentInstruction[1].u.operand)), ImmediateSetWithFlush);
3070                 m_inlineStackTop->m_didReturn = true;
3071                 if (m_inlineStackTop->m_unlinkedBlocks.isEmpty()) {
3072                     // If we're returning from the first block, then we're done parsing.
3073                     ASSERT(m_inlineStackTop->m_callsiteBlockHead == m_graph.lastBlock());
3074                     shouldContinueParsing = false;
3075                     LAST_OPCODE(op_ret);
3076                 } else {
3077                     // If inlining created blocks, and we're doing a return, then we need some
3078                     // special linking.
3079                     ASSERT(m_inlineStackTop->m_unlinkedBlocks.last().m_block == m_graph.lastBlock());
3080                     m_inlineStackTop->m_unlinkedBlocks.last().m_needsNormalLinking = false;
3081                 }
3082                 if (m_currentIndex + OPCODE_LENGTH(op_ret) != m_inlineStackTop->m_codeBlock->instructions().size() || m_inlineStackTop->m_didEarlyReturn) {
3083                     ASSERT(m_currentIndex + OPCODE_LENGTH(op_ret) <= m_inlineStackTop->m_codeBlock->instructions().size());
3084                     addToGraph(Jump, OpInfo(0));
3085                     m_inlineStackTop->m_unlinkedBlocks.last().m_needsEarlyReturnLinking = true;
3086                     m_inlineStackTop->m_didEarlyReturn = true;
3087                 }
3088                 LAST_OPCODE(op_ret);
3089             }
3090             addToGraph(Return, get(VirtualRegister(currentInstruction[1].u.operand)));
3091             LAST_OPCODE(op_ret);
3092             
3093         case op_end:
3094             flushForReturn();
3095             ASSERT(!inlineCallFrame());
3096             addToGraph(Return, get(VirtualRegister(currentInstruction[1].u.operand)));
3097             LAST_OPCODE(op_end);
3098
3099         case op_throw:
3100             addToGraph(Throw, get(VirtualRegister(currentInstruction[1].u.operand)));
3101             flushForTerminal();
3102             addToGraph(Unreachable);
3103             LAST_OPCODE(op_throw);
3104             
3105         case op_throw_static_error:
3106             addToGraph(ThrowReferenceError);
3107             flushForTerminal();
3108             addToGraph(Unreachable);
3109             LAST_OPCODE(op_throw_static_error);
3110             
3111         case op_call:
3112             handleCall(currentInstruction, Call, CodeForCall);
3113             NEXT_OPCODE(op_call);
3114             
3115         case op_construct:
3116             handleCall(currentInstruction, Construct, CodeForConstruct);
3117             NEXT_OPCODE(op_construct);
3118             
3119         case op_call_varargs: {
3120             int result = currentInstruction[1].u.operand;
3121             int callee = currentInstruction[2].u.operand;
3122             int thisReg = currentInstruction[3].u.operand;
3123             int arguments = currentInstruction[4].u.operand;
3124             int firstFreeReg = currentInstruction[5].u.operand;
3125             
3126             ASSERT(inlineCallFrame());
3127             ASSERT_UNUSED(arguments, arguments == m_inlineStackTop->m_codeBlock->argumentsRegister().offset());
3128             ASSERT(!m_inlineStackTop->m_codeBlock->symbolTable()->slowArguments());
3129
3130             addToGraph(CheckArgumentsNotCreated);
3131
3132             unsigned argCount = inlineCallFrame()->arguments.size();
3133             
3134             // Let's compute the register offset. We start with the last used register, and
3135             // then adjust for the things we want in the call frame.
3136             int registerOffset = firstFreeReg + 1;
3137             registerOffset -= argCount; // We will be passing some arguments.
3138             registerOffset -= JSStack::CallFrameHeaderSize; // We will pretend to have a call frame header.
3139             
3140             // Get the alignment right.
3141             registerOffset = -WTF::roundUpToMultipleOf(
3142                 stackAlignmentRegisters(),
3143                 -registerOffset);
3144
3145             ensureLocals(
3146                 m_inlineStackTop->remapOperand(
3147                     VirtualRegister(registerOffset)).toLocal());
3148             
3149             // The bytecode wouldn't have set up the arguments. But we'll do it and make it
3150             // look like the bytecode had done it.
3151             int nextRegister = registerOffset + JSStack::CallFrameHeaderSize;
3152             set(VirtualRegister(nextRegister++), get(VirtualRegister(thisReg)), ImmediateNakedSet);
3153             for (unsigned argument = 1; argument < argCount; ++argument)
3154                 set(VirtualRegister(nextRegister++), get(virtualRegisterForArgument(argument)), ImmediateNakedSet);
3155             
3156             handleCall(
3157                 result, Call, CodeForCall, OPCODE_LENGTH(op_call_varargs),
3158                 callee, argCount, registerOffset);
3159             NEXT_OPCODE(op_call_varargs);
3160         }
3161             
3162         case op_jneq_ptr:
3163             // Statically speculate for now. It makes sense to let speculate-only jneq_ptr
3164             // support simmer for a while before making it more general, since it's
3165             // already gnarly enough as it is.
3166             ASSERT(pointerIsFunction(currentInstruction[2].u.specialPointer));
3167             addToGraph(
3168                 CheckCell,
3169                 OpInfo(m_graph.freeze(static_cast<JSCell*>(actualPointerFor(
3170                     m_inlineStackTop->m_codeBlock, currentInstruction[2].u.specialPointer)))),
3171                 get(VirtualRegister(currentInstruction[1].u.operand)));
3172             addToGraph(Jump, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jneq_ptr)));
3173             LAST_OPCODE(op_jneq_ptr);
3174
3175         case op_resolve_scope: {
3176             int dst = currentInstruction[1].u.operand;
3177             ResolveType resolveType = static_cast<ResolveType>(currentInstruction[4].u.operand);
3178             unsigned depth = currentInstruction[5].u.operand;
3179
3180             // get_from_scope and put_to_scope depend on this watchpoint forcing OSR exit, so they don't add their own watchpoints.
3181             if (needsVarInjectionChecks(resolveType))
3182                 addToGraph(VarInjectionWatchpoint);
3183
3184             switch (resolveType) {
3185             case GlobalProperty:
3186             case GlobalVar:
3187             case GlobalPropertyWithVarInjectionChecks:
3188             case GlobalVarWithVarInjectionChecks:
3189                 set(VirtualRegister(dst), weakJSConstant(m_inlineStackTop->m_codeBlock->globalObject()));
3190                 if (resolveType == GlobalPropertyWithVarInjectionChecks || resolveType == GlobalVarWithVarInjectionChecks)
3191                     addToGraph(Phantom, getDirect(m_inlineStackTop->remapOperand(VirtualRegister(currentInstruction[2].u.operand))));
3192                 break;
3193             case LocalClosureVar:
3194             case ClosureVar:
3195             case ClosureVarWithVarInjectionChecks: {
3196                 JSLexicalEnvironment* lexicalEnvironment = currentInstruction[6].u.lexicalEnvironment.get();
3197                 if (lexicalEnvironment
3198                     && lexicalEnvironment->symbolTable()->m_functionEnteredOnce.isStillValid()) {
3199                     addToGraph(FunctionReentryWatchpoint, OpInfo(lexicalEnvironment->symbolTable()));
3200                     addToGraph(Phantom, getDirect(m_inlineStackTop->remapOperand(VirtualRegister(currentInstruction[2].u.operand))));
3201                     set(VirtualRegister(dst), weakJSConstant(lexicalEnvironment));
3202                     break;
3203                 }
3204                 set(VirtualRegister(dst), getScope(VirtualRegister(currentInstruction[2].u.operand), depth));
3205                 if (inlineCallFrame())
3206                     addToGraph(Phantom, getDirect(m_inlineStackTop->remapOperand(VirtualRegister(currentInstruction[2].u.operand))));
3207                 break;
3208             }
3209             case Dynamic:
3210                 RELEASE_ASSERT_NOT_REACHED();
3211                 break;
3212             }
3213             NEXT_OPCODE(op_resolve_scope);
3214         }
3215
3216         case op_get_from_scope: {
3217             int dst = currentInstruction[1].u.operand;
3218             int scope = currentInstruction[2].u.operand;
3219             unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand];
3220             AtomicStringImpl* uid = m_graph.identifiers()[identifierNumber];
3221             ResolveType resolveType = ResolveModeAndType(currentInstruction[4].u.operand).type();
3222
3223             Structure* structure = 0;
3224             WatchpointSet* watchpoints = 0;
3225             uintptr_t operand;
3226             {
3227                 ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
3228                 if (resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks)
3229                     watchpoints = currentInstruction[5].u.watchpointSet;
3230                 else
3231                     structure = currentInstruction[5].u.structure.get();
3232                 operand = reinterpret_cast<uintptr_t>(currentInstruction[6].u.pointer);
3233             }
3234
3235             UNUSED_PARAM(watchpoints); // We will use this in the future. For now we set it as a way of documenting the fact that that's what index 5 is in GlobalVar mode.
3236
3237             JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
3238
3239             switch (resolveType) {
3240             case GlobalProperty:
3241             case GlobalPropertyWithVarInjectionChecks: {
3242                 SpeculatedType prediction = getPrediction();
3243                 GetByIdStatus status = GetByIdStatus::computeFor(structure, uid);
3244                 if (status.state() != GetByIdStatus::Simple
3245                     || status.numVariants() != 1
3246                     || status[0].structureSet().size() != 1) {
3247                     set(VirtualRegister(dst), addToGraph(GetByIdFlush, OpInfo(identifierNumber), OpInfo(prediction), get(VirtualRegister(scope))));
3248                     break;
3249                 }
3250                 Node* base = cellConstantWithStructureCheck(globalObject, status[0].structureSet().onlyStructure());
3251                 addToGraph(Phantom, get(VirtualRegister(scope)));
3252                 set(VirtualRegister(dst), handleGetByOffset(prediction, base, status[0].structureSet(), identifierNumber, operand));
3253                 break;
3254             }
3255             case GlobalVar:
3256             case GlobalVarWithVarInjectionChecks: {
3257                 addToGraph(Phantom, get(VirtualRegister(scope)));
3258                 SymbolTableEntry entry = globalObject->symbolTable()->get(uid);
3259                 VariableWatchpointSet* watchpointSet = entry.watchpointSet();
3260                 JSValue inferredValue =
3261                     watchpointSet ? watchpointSet->inferredValue() : JSValue();
3262                 if (!inferredValue) {
3263                     SpeculatedType prediction = getPrediction();
3264                     set(VirtualRegister(dst), addToGraph(GetGlobalVar, OpInfo(operand), OpInfo(prediction)));
3265                     break;
3266                 }
3267                 
3268                 addToGraph(VariableWatchpoint, OpInfo(watchpointSet));
3269                 set(VirtualRegister(dst), weakJSConstant(inferredValue));
3270                 break;
3271             }
3272             case LocalClosureVar:
3273             case ClosureVar:
3274             case ClosureVarWithVarInjectionChecks: {
3275                 Node* scopeNode = get(VirtualRegister(scope));
3276                 if (JSLexicalEnvironment* lexicalEnvironment = m_graph.tryGetActivation(scopeNode)) {
3277                     SymbolTable* symbolTable = lexicalEnvironment->symbolTable();
3278                     ConcurrentJITLocker locker(symbolTable->m_lock);
3279                     SymbolTable::Map::iterator iter = symbolTable->find(locker, uid);
3280                     ASSERT(iter != symbolTable->end(locker));
3281                     VariableWatchpointSet* watchpointSet = iter->value.watchpointSet();
3282                     if (watchpointSet) {
3283                         if (JSValue value = watchpointSet->inferredValue()) {
3284                             addToGraph(Phantom, scopeNode);
3285                             addToGraph(VariableWatchpoint, OpInfo(watchpointSet));
3286                             set(VirtualRegister(dst), weakJSConstant(value));
3287                             break;
3288                         }
3289                     }
3290                 }
3291                 SpeculatedType prediction = getPrediction();
3292                 set(VirtualRegister(dst),
3293                     addToGraph(GetClosureVar, OpInfo(operand), OpInfo(prediction), 
3294                         addToGraph(GetClosureRegisters, scopeNode)));
3295                 break;
3296             }
3297             case Dynamic:
3298                 RELEASE_ASSERT_NOT_REACHED();
3299                 break;
3300             }
3301             NEXT_OPCODE(op_get_from_scope);
3302         }
3303
3304         case op_put_to_scope: {
3305             unsigned scope = currentInstruction[1].u.operand;
3306             unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
3307             unsigned value = currentInstruction[3].u.operand;
3308             ResolveType resolveType = ResolveModeAndType(currentInstruction[4].u.operand).type();
3309             AtomicStringImpl* uid = m_graph.identifiers()[identifierNumber];
3310
3311             Structure* structure = 0;
3312             VariableWatchpointSet* watchpoints = 0;
3313             uintptr_t operand;
3314             {
3315                 ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
3316                 if (resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks || resolveType == LocalClosureVar)
3317                     watchpoints = currentInstruction[5].u.watchpointSet;
3318                 else
3319                     structure = currentInstruction[5].u.structure.get();
3320                 operand = reinterpret_cast<uintptr_t>(currentInstruction[6].u.pointer);
3321             }
3322
3323             JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
3324
3325             switch (resolveType) {
3326             case GlobalProperty:
3327             case GlobalPropertyWithVarInjectionChecks: {
3328                 PutByIdStatus status = PutByIdStatus::computeFor(globalObject, structure, uid, false);
3329                 if (status.numVariants() != 1
3330                     || status[0].kind() != PutByIdVariant::Replace
3331                     || status[0].structure().size() != 1) {
3332                     addToGraph(PutById, OpInfo(identifierNumber), get(VirtualRegister(scope)), get(VirtualRegister(value)));
3333                     break;
3334                 }
3335                 ASSERT(status[0].structure().onlyStructure() == structure);
3336                 Node* base = cellConstantWithStructureCheck(globalObject, structure);
3337                 addToGraph(Phantom, get(VirtualRegister(scope)));
3338                 handlePutByOffset(base, identifierNumber, static_cast<PropertyOffset>(operand), get(VirtualRegister(value)));
3339                 // Keep scope alive until after put.
3340                 addToGraph(Phantom, get(VirtualRegister(scope)));
3341                 break;
3342             }
3343             case GlobalVar:
3344             case GlobalVarWithVarInjectionChecks: {
3345                 SymbolTableEntry entry = globalObject->symbolTable()->get(uid);
3346                 ASSERT(watchpoints == entry.watchpointSet());
3347                 Node* valueNode = get(VirtualRegister(value));
3348                 addToGraph(PutGlobalVar, OpInfo(operand), valueNode);
3349                 if (watchpoints->state() != IsInvalidated)
3350                     addToGraph(NotifyWrite, OpInfo(watchpoints), valueNode);
3351                 // Keep scope alive until after put.
3352                 addToGraph(Phantom, get(VirtualRegister(scope)));
3353                 break;
3354             }
3355             case LocalClosureVar:
3356             case ClosureVar:
3357             case ClosureVarWithVarInjectionChecks: {
3358                 Node* scopeNode = get(VirtualRegister(scope));
3359                 Node* scopeRegisters = addToGraph(GetClosureRegisters, scopeNode);
3360                 Node* valueNode = get(VirtualRegister(value));
3361
3362                 if (watchpoints && watchpoints->state() != IsInvalidated)
3363                     addToGraph(NotifyWrite, OpInfo(watchpoints), valueNode);
3364
3365                 addToGraph(PutClosureVar, OpInfo(operand), scopeNode, scopeRegisters, valueNode);
3366                 break;
3367             }
3368             case Dynamic:
3369                 RELEASE_ASSERT_NOT_REACHED();
3370                 break;
3371             }
3372             NEXT_OPCODE(op_put_to_scope);
3373         }
3374
3375         case op_loop_hint: {
3376             // Baseline->DFG OSR jumps between loop hints. The DFG assumes that Baseline->DFG
3377             // OSR can only happen at basic block boundaries. Assert that these two statements
3378             // are compatible.
3379             RELEASE_ASSERT(m_currentIndex == blockBegin);
3380             
3381             // We never do OSR into an inlined code block. That could not happen, since OSR
3382             // looks up the code block that is the replacement for the baseline JIT code
3383             // block. Hence, machine code block = true code block = not inline code block.
3384             if (!m_inlineStackTop->m_caller)
3385                 m_currentBlock->isOSRTarget = true;
3386
3387             addToGraph(LoopHint);
3388             
3389             if (m_vm->watchdog && m_vm->watchdog->isEnabled())
3390                 addToGraph(CheckWatchdogTimer);
3391             
3392             NEXT_OPCODE(op_loop_hint);
3393         }
3394             
3395         case op_init_lazy_reg: {
3396             set(VirtualRegister(currentInstruction[1].u.operand), jsConstant(JSValue()));
3397             ASSERT(operandIsLocal(currentInstruction[1].u.operand));
3398             m_graph.m_lazyVars.set(VirtualRegister(currentInstruction[1].u.operand).toLocal());
3399             NEXT_OPCODE(op_init_lazy_reg);
3400         }
3401             
3402         case op_create_lexical_environment: {
3403             Node* lexicalEnvironment = addToGraph(CreateActivation, get(VirtualRegister(currentInstruction[1].u.operand)), get(VirtualRegister(currentInstruction[2].u.operand)));
3404             set(VirtualRegister(currentInstruction[1].u.operand), lexicalEnvironment);
3405             set(VirtualRegister(currentInstruction[2].u.operand), lexicalEnvironment);
3406             NEXT_OPCODE(op_create_lexical_environment);
3407         }
3408             
3409         case op_get_scope: {
3410             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(GetScope, get(VirtualRegister(JSStack::Callee))));
3411             NEXT_OPCODE(op_get_scope);
3412         }
3413             
3414         case op_create_arguments: {
3415             m_graph.m_hasArguments = true;
3416             Node* createArguments = addToGraph(CreateArguments, get(VirtualRegister(currentInstruction[1].u.operand)));
3417             set(VirtualRegister(currentInstruction[1].u.operand), createArguments);
3418             set(unmodifiedArgumentsRegister(VirtualRegister(currentInstruction[1].u.operand)), createArguments);
3419             NEXT_OPCODE(op_create_arguments);
3420         }
3421
3422         case op_tear_off_arguments: {
3423             m_graph.m_hasArguments = true;
3424             addToGraph(TearOffArguments, get(VirtualRegister(currentInstruction[1].u.operand)), get(VirtualRegister(currentInstruction[2].u.operand)));
3425             NEXT_OPCODE(op_tear_off_arguments);
3426         }
3427             
3428         case op_get_arguments_length: {
3429             m_graph.m_hasArguments = true;
3430             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(GetMyArgumentsLengthSafe));
3431             NEXT_OPCODE(op_get_arguments_length);
3432         }
3433             
3434         case op_get_argument_by_val: {
3435             m_graph.m_hasArguments = true;
3436             set(VirtualRegister(currentInstruction[1].u.operand),
3437                 addToGraph(
3438                     GetMyArgumentByValSafe, OpInfo(0), OpInfo(getPrediction()),
3439                     get(VirtualRegister(currentInstruction[3].u.operand))));
3440             NEXT_OPCODE(op_get_argument_by_val);
3441         }
3442             
3443         case op_new_func: {
3444             if (!currentInstruction[4].u.operand) {
3445                 set(VirtualRegister(currentInstruction[1].u.operand),
3446                     addToGraph(NewFunctionNoCheck, OpInfo(currentInstruction[3].u.operand), get(VirtualRegister(currentInstruction[2].u.operand))));
3447             } else {
3448                 set(VirtualRegister(currentInstruction[1].u.operand),
3449                     addToGraph(
3450                         NewFunction,
3451                         OpInfo(currentInstruction[3].u.operand),
3452                         get(VirtualRegister(currentInstruction[1].u.operand)), get(VirtualRegister(currentInstruction[2].u.operand))));
3453             }
3454             NEXT_OPCODE(op_new_func);
3455         }
3456
3457         case op_new_func_exp: {
3458             set(VirtualRegister(currentInstruction[1].u.operand),
3459                 addToGraph(NewFunctionExpression, OpInfo(currentInstruction[3].u.operand), get(VirtualRegister(currentInstruction[2].u.operand))));
3460             NEXT_OPCODE(op_new_func_exp);
3461         }
3462
3463         case op_typeof: {
3464             set(VirtualRegister(currentInstruction[1].u.operand),
3465                 addToGraph(TypeOf, get(VirtualRegister(currentInstruction[2].u.operand))));
3466             NEXT_OPCODE(op_typeof);
3467         }
3468
3469         case op_to_number: {
3470             Node* node = get(VirtualRegister(currentInstruction[2].u.operand));
3471             addToGraph(Phantom, Edge(node, NumberUse));
3472             set(VirtualRegister(currentInstruction[1].u.operand), node);
3473             NEXT_OPCODE(op_to_number);
3474         }
3475             
3476         case op_in: {
3477             set(VirtualRegister(currentInstruction[1].u.operand),
3478                 addToGraph(In, get(VirtualRegister(currentInstruction[2].u.operand)), get(VirtualRegister(currentInstruction[3].u.operand))));
3479             NEXT_OPCODE(op_in);
3480         }
3481
3482         case op_get_enumerable_length: {
3483             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(GetEnumerableLength, 
3484                 get(VirtualRegister(currentInstruction[2].u.operand))));
3485             NEXT_OPCODE(op_get_enumerable_length);
3486         }
3487
3488         case op_has_generic_property: {
3489             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(HasGenericProperty, 
3490                 get(VirtualRegister(currentInstruction[2].u.operand)),
3491                 get(VirtualRegister(currentInstruction[3].u.operand))));
3492             NEXT_OPCODE(op_has_generic_property);
3493         }
3494
3495         case op_has_structure_property: {
3496             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(HasStructureProperty, 
3497                 get(VirtualRegister(currentInstruction[2].u.operand)),
3498                 get(VirtualRegister(currentInstruction[3].u.operand)),
3499                 get(VirtualRegister(currentInstruction[4].u.operand))));
3500             NEXT_OPCODE(op_has_structure_property);
3501         }
3502
3503         case op_has_indexed_property: {
3504             Node* base = get(VirtualRegister(currentInstruction[2].u.operand));
3505             ArrayMode arrayMode = getArrayModeConsideringSlowPath(currentInstruction[4].u.arrayProfile, Array::Read);
3506             Node* property = get(VirtualRegister(currentInstruction[3].u.operand));
3507             Node* hasIterableProperty = addToGraph(HasIndexedProperty, OpInfo(arrayMode.asWord()), base, property);
3508             set(VirtualRegister(currentInstruction[1].u.operand), hasIterableProperty);
3509             NEXT_OPCODE(op_has_indexed_property);
3510         }
3511
3512         case op_get_direct_pname: {
3513             SpeculatedType prediction = getPredictionWithoutOSRExit();
3514             
3515             Node* base = get(VirtualRegister(currentInstruction[2].u.operand));
3516             Node* property = get(VirtualRegister(currentInstruction[3].u.operand));
3517             Node* index = get(VirtualRegister(currentInstruction[4].u.operand));
3518             Node* enumerator = get(VirtualRegister(currentInstruction[5].u.operand));
3519
3520             addVarArgChild(base);
3521             addVarArgChild(property);
3522             addVarArgChild(index);
3523             addVarArgChild(enumerator);
3524             set(VirtualRegister(currentInstruction[1].u.operand), 
3525                 addToGraph(Node::VarArg, GetDirectPname, OpInfo(0), OpInfo(prediction)));
3526
3527             NEXT_OPCODE(op_get_direct_pname);
3528         }
3529
3530         case op_get_structure_property_enumerator: {
3531             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(GetStructurePropertyEnumerator, 
3532                 get(VirtualRegister(currentInstruction[2].u.operand)),
3533                 get(VirtualRegister(currentInstruction[3].u.operand))));
3534             NEXT_OPCODE(op_get_structure_property_enumerator);
3535         }
3536
3537         case op_get_generic_property_enumerator: {
3538             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(GetGenericPropertyEnumerator, 
3539                 get(VirtualRegister(currentInstruction[2].u.operand)),
3540                 get(VirtualRegister(currentInstruction[3].u.operand)),
3541                 get(VirtualRegister(currentInstruction[4].u.operand))));
3542             NEXT_OPCODE(op_get_generic_property_enumerator);
3543         }
3544
3545         case op_next_enumerator_pname: {
3546             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(GetEnumeratorPname, 
3547                 get(VirtualRegister(currentInstruction[2].u.operand)),
3548                 get(VirtualRegister(currentInstruction[3].u.operand))));
3549             NEXT_OPCODE(op_next_enumerator_pname);
3550         }
3551
3552         case op_to_index_string: {
3553             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(ToIndexString, 
3554                 get(VirtualRegister(currentInstruction[2].u.operand))));
3555             NEXT_OPCODE(op_to_index_string);
3556         }
3557
3558         default:
3559             // Parse failed! This should not happen because the capabilities checker
3560             // should have caught it.
3561             RELEASE_ASSERT_NOT_REACHED();
3562             return false;
3563         }
3564     }
3565 }
3566
3567 void ByteCodeParser::linkBlock(BasicBlock* block, Vector<BasicBlock*>& possibleTargets)
3568 {
3569     ASSERT(!block->isLinked);
3570     ASSERT(!block->isEmpty());
3571     Node* node = block->last();
3572     ASSERT(node->isTerminal());
3573     
3574     switch (node->op()) {
3575     case Jump:
3576         node->targetBlock() = blockForBytecodeOffset(possibleTargets, node->targetBytecodeOffsetDuringParsing());
3577         break;
3578         
3579     case Branch: {
3580         BranchData* data = node->branchData();
3581         data->taken.block = blockForBytecodeOffset(possibleTargets, data->takenBytecodeIndex());
3582         data->notTaken.block = blockForBytecodeOffset(possibleTargets, data->notTakenBytecodeIndex());
3583         break;
3584     }
3585         
3586     case Switch: {
3587         SwitchData* data = node->switchData();
3588         for (unsigned i = node->switchData()->cases.size(); i--;)
3589             data->cases[i].target.block = blockForBytecodeOffset(possibleTargets, data->cases[i].target.bytecodeIndex());
3590         data->fallThrough.block = blockForBytecodeOffset(possibleTargets, data->fallThrough.bytecodeIndex());
3591         break;
3592     }
3593         
3594     default: