Polymorphic call inlining should be based on polymorphic call inline caching rather...
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGByteCodeParser.cpp
1  /*
2  * Copyright (C) 2011-2015 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGByteCodeParser.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "ArrayConstructor.h"
32 #include "BasicBlockLocation.h"
33 #include "CallLinkStatus.h"
34 #include "CodeBlock.h"
35 #include "CodeBlockWithJITType.h"
36 #include "DFGArrayMode.h"
37 #include "DFGCapabilities.h"
38 #include "DFGJITCode.h"
39 #include "GetByIdStatus.h"
40 #include "Heap.h"
41 #include "JSLexicalEnvironment.h"
42 #include "JSCInlines.h"
43 #include "PreciseJumpTargets.h"
44 #include "PutByIdStatus.h"
45 #include "StackAlignment.h"
46 #include "StringConstructor.h"
47 #include <wtf/CommaPrinter.h>
48 #include <wtf/HashMap.h>
49 #include <wtf/MathExtras.h>
50 #include <wtf/StdLibExtras.h>
51
52 namespace JSC { namespace DFG {
53
54 static const bool verbose = false;
55
56 class ConstantBufferKey {
57 public:
58     ConstantBufferKey()
59         : m_codeBlock(0)
60         , m_index(0)
61     {
62     }
63     
64     ConstantBufferKey(WTF::HashTableDeletedValueType)
65         : m_codeBlock(0)
66         , m_index(1)
67     {
68     }
69     
70     ConstantBufferKey(CodeBlock* codeBlock, unsigned index)
71         : m_codeBlock(codeBlock)
72         , m_index(index)
73     {
74     }
75     
76     bool operator==(const ConstantBufferKey& other) const
77     {
78         return m_codeBlock == other.m_codeBlock
79             && m_index == other.m_index;
80     }
81     
82     unsigned hash() const
83     {
84         return WTF::PtrHash<CodeBlock*>::hash(m_codeBlock) ^ m_index;
85     }
86     
87     bool isHashTableDeletedValue() const
88     {
89         return !m_codeBlock && m_index;
90     }
91     
92     CodeBlock* codeBlock() const { return m_codeBlock; }
93     unsigned index() const { return m_index; }
94     
95 private:
96     CodeBlock* m_codeBlock;
97     unsigned m_index;
98 };
99
100 struct ConstantBufferKeyHash {
101     static unsigned hash(const ConstantBufferKey& key) { return key.hash(); }
102     static bool equal(const ConstantBufferKey& a, const ConstantBufferKey& b)
103     {
104         return a == b;
105     }
106     
107     static const bool safeToCompareToEmptyOrDeleted = true;
108 };
109
110 } } // namespace JSC::DFG
111
112 namespace WTF {
113
114 template<typename T> struct DefaultHash;
115 template<> struct DefaultHash<JSC::DFG::ConstantBufferKey> {
116     typedef JSC::DFG::ConstantBufferKeyHash Hash;
117 };
118
119 template<typename T> struct HashTraits;
120 template<> struct HashTraits<JSC::DFG::ConstantBufferKey> : SimpleClassHashTraits<JSC::DFG::ConstantBufferKey> { };
121
122 } // namespace WTF
123
124 namespace JSC { namespace DFG {
125
126 // === ByteCodeParser ===
127 //
128 // This class is used to compile the dataflow graph from a CodeBlock.
129 class ByteCodeParser {
130 public:
131     ByteCodeParser(Graph& graph)
132         : m_vm(&graph.m_vm)
133         , m_codeBlock(graph.m_codeBlock)
134         , m_profiledBlock(graph.m_profiledBlock)
135         , m_graph(graph)
136         , m_currentBlock(0)
137         , m_currentIndex(0)
138         , m_constantUndefined(graph.freeze(jsUndefined()))
139         , m_constantNull(graph.freeze(jsNull()))
140         , m_constantNaN(graph.freeze(jsNumber(PNaN)))
141         , m_constantOne(graph.freeze(jsNumber(1)))
142         , m_numArguments(m_codeBlock->numParameters())
143         , m_numLocals(m_codeBlock->m_numCalleeRegisters)
144         , m_parameterSlots(0)
145         , m_numPassedVarArgs(0)
146         , m_inlineStackTop(0)
147         , m_haveBuiltOperandMaps(false)
148         , m_currentInstruction(0)
149     {
150         ASSERT(m_profiledBlock);
151     }
152     
153     // Parse a full CodeBlock of bytecode.
154     bool parse();
155     
156 private:
157     struct InlineStackEntry;
158
159     // Just parse from m_currentIndex to the end of the current CodeBlock.
160     void parseCodeBlock();
161     
162     void ensureLocals(unsigned newNumLocals)
163     {
164         if (newNumLocals <= m_numLocals)
165             return;
166         m_numLocals = newNumLocals;
167         for (size_t i = 0; i < m_graph.numBlocks(); ++i)
168             m_graph.block(i)->ensureLocals(newNumLocals);
169     }
170
171     // Helper for min and max.
172     bool handleMinMax(int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis);
173     
174     // Handle calls. This resolves issues surrounding inlining and intrinsics.
175     void handleCall(
176         int result, NodeType op, InlineCallFrame::Kind, unsigned instructionSize,
177         Node* callTarget, int argCount, int registerOffset, CallLinkStatus,
178         SpeculatedType prediction);
179     void handleCall(
180         int result, NodeType op, InlineCallFrame::Kind, unsigned instructionSize,
181         Node* callTarget, int argCount, int registerOffset, CallLinkStatus);
182     void handleCall(int result, NodeType op, CodeSpecializationKind, unsigned instructionSize, int callee, int argCount, int registerOffset);
183     void handleCall(Instruction* pc, NodeType op, CodeSpecializationKind);
184     void emitFunctionChecks(CallVariant, Node* callTarget, int registerOffset, CodeSpecializationKind);
185     void undoFunctionChecks(CallVariant);
186     void emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis, CodeSpecializationKind);
187     unsigned inliningCost(CallVariant, int argumentCountIncludingThis, CodeSpecializationKind); // Return UINT_MAX if it's not an inlining candidate. By convention, intrinsics have a cost of 1.
188     // Handle inlining. Return true if it succeeded, false if we need to plant a call.
189     bool handleInlining(Node* callTargetNode, int resultOperand, const CallLinkStatus&, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, NodeType callOp, InlineCallFrame::Kind, SpeculatedType prediction);
190     enum CallerLinkability { CallerDoesNormalLinking, CallerLinksManually };
191     bool attemptToInlineCall(Node* callTargetNode, int resultOperand, CallVariant, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind, CallerLinkability, SpeculatedType prediction, unsigned& inliningBalance);
192     void inlineCall(Node* callTargetNode, int resultOperand, CallVariant, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind, CallerLinkability);
193     void cancelLinkingForBlock(InlineStackEntry*, BasicBlock*); // Only works when the given block is the last one to have been added for that inline stack entry.
194     // Handle intrinsic functions. Return true if it succeeded, false if we need to plant a call.
195     bool handleIntrinsic(int resultOperand, Intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction);
196     bool handleTypedArrayConstructor(int resultOperand, InternalFunction*, int registerOffset, int argumentCountIncludingThis, TypedArrayType);
197     bool handleConstantInternalFunction(int resultOperand, InternalFunction*, int registerOffset, int argumentCountIncludingThis, CodeSpecializationKind);
198     Node* handlePutByOffset(Node* base, unsigned identifier, PropertyOffset, Node* value);
199     Node* handleGetByOffset(SpeculatedType, Node* base, const StructureSet&, unsigned identifierNumber, PropertyOffset, NodeType op = GetByOffset);
200     void handleGetById(
201         int destinationOperand, SpeculatedType, Node* base, unsigned identifierNumber,
202         const GetByIdStatus&);
203     void emitPutById(
204         Node* base, unsigned identifierNumber, Node* value,  const PutByIdStatus&, bool isDirect);
205     void handlePutById(
206         Node* base, unsigned identifierNumber, Node* value, const PutByIdStatus&,
207         bool isDirect);
208     void emitChecks(const ConstantStructureCheckVector&);
209
210     Node* getScope(VirtualRegister scopeChain, unsigned skipCount);
211     
212     void prepareToParseBlock();
213     void clearCaches();
214
215     // Parse a single basic block of bytecode instructions.
216     bool parseBlock(unsigned limit);
217     // Link block successors.
218     void linkBlock(BasicBlock*, Vector<BasicBlock*>& possibleTargets);
219     void linkBlocks(Vector<UnlinkedBlock>& unlinkedBlocks, Vector<BasicBlock*>& possibleTargets);
220     
221     VariableAccessData* newVariableAccessData(VirtualRegister operand, bool isCaptured)
222     {
223         ASSERT(!operand.isConstant());
224         
225         m_graph.m_variableAccessData.append(VariableAccessData(operand, isCaptured));
226         return &m_graph.m_variableAccessData.last();
227     }
228     
229     // Get/Set the operands/result of a bytecode instruction.
230     Node* getDirect(VirtualRegister operand)
231     {
232         ASSERT(!operand.isConstant());
233
234         // Is this an argument?
235         if (operand.isArgument())
236             return getArgument(operand);
237
238         // Must be a local.
239         return getLocal(operand);
240     }
241
242     Node* get(VirtualRegister operand)
243     {
244         if (operand.isConstant()) {
245             unsigned constantIndex = operand.toConstantIndex();
246             unsigned oldSize = m_constants.size();
247             if (constantIndex >= oldSize || !m_constants[constantIndex]) {
248                 JSValue value = m_inlineStackTop->m_codeBlock->getConstant(operand.offset());
249                 if (constantIndex >= oldSize) {
250                     m_constants.grow(constantIndex + 1);
251                     for (unsigned i = oldSize; i < m_constants.size(); ++i)
252                         m_constants[i] = nullptr;
253                 }
254                 m_constants[constantIndex] =
255                     addToGraph(JSConstant, OpInfo(m_graph.freezeStrong(value)));
256             }
257             ASSERT(m_constants[constantIndex]);
258             return m_constants[constantIndex];
259         }
260         
261         if (inlineCallFrame()) {
262             if (!inlineCallFrame()->isClosureCall) {
263                 JSFunction* callee = inlineCallFrame()->calleeConstant();
264                 if (operand.offset() == JSStack::Callee)
265                     return weakJSConstant(callee);
266                 if (operand == m_inlineStackTop->m_codeBlock->scopeRegister())
267                     return weakJSConstant(callee->scope());
268             }
269         } else if (operand.offset() == JSStack::Callee)
270             return addToGraph(GetCallee);
271         
272         return getDirect(m_inlineStackTop->remapOperand(operand));
273     }
274     
275     enum SetMode {
276         // A normal set which follows a two-phase commit that spans code origins. During
277         // the current code origin it issues a MovHint, and at the start of the next
278         // code origin there will be a SetLocal. If the local needs flushing, the second
279         // SetLocal will be preceded with a Flush.
280         NormalSet,
281         
282         // A set where the SetLocal happens immediately and there is still a Flush. This
283         // is relevant when assigning to a local in tricky situations for the delayed
284         // SetLocal logic but where we know that we have not performed any side effects
285         // within this code origin. This is a safe replacement for NormalSet anytime we
286         // know that we have not yet performed side effects in this code origin.
287         ImmediateSetWithFlush,
288         
289         // A set where the SetLocal happens immediately and we do not Flush it even if
290         // this is a local that is marked as needing it. This is relevant when
291         // initializing locals at the top of a function.
292         ImmediateNakedSet
293     };
294     Node* setDirect(VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
295     {
296         addToGraph(MovHint, OpInfo(operand.offset()), value);
297         
298         DelayedSetLocal delayed = DelayedSetLocal(operand, value);
299         
300         if (setMode == NormalSet) {
301             m_setLocalQueue.append(delayed);
302             return 0;
303         }
304         
305         return delayed.execute(this, setMode);
306     }
307     
308     void processSetLocalQueue()
309     {
310         for (unsigned i = 0; i < m_setLocalQueue.size(); ++i)
311             m_setLocalQueue[i].execute(this);
312         m_setLocalQueue.resize(0);
313     }
314
315     Node* set(VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
316     {
317         return setDirect(m_inlineStackTop->remapOperand(operand), value, setMode);
318     }
319     
320     Node* injectLazyOperandSpeculation(Node* node)
321     {
322         ASSERT(node->op() == GetLocal);
323         ASSERT(node->origin.semantic.bytecodeIndex == m_currentIndex);
324         ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
325         LazyOperandValueProfileKey key(m_currentIndex, node->local());
326         SpeculatedType prediction = m_inlineStackTop->m_lazyOperands.prediction(locker, key);
327         node->variableAccessData()->predict(prediction);
328         return node;
329     }
330
331     // Used in implementing get/set, above, where the operand is a local variable.
332     Node* getLocal(VirtualRegister operand)
333     {
334         unsigned local = operand.toLocal();
335
336         if (local < m_localWatchpoints.size()) {
337             if (VariableWatchpointSet* set = m_localWatchpoints[local]) {
338                 if (JSValue value = set->inferredValue()) {
339                     addToGraph(FunctionReentryWatchpoint, OpInfo(m_codeBlock->symbolTable()));
340                     addToGraph(VariableWatchpoint, OpInfo(set));
341                     return weakJSConstant(value);
342                 }
343             }
344         }
345
346         Node* node = m_currentBlock->variablesAtTail.local(local);
347         bool isCaptured = m_codeBlock->isCaptured(operand, inlineCallFrame());
348         
349         // This has two goals: 1) link together variable access datas, and 2)
350         // try to avoid creating redundant GetLocals. (1) is required for
351         // correctness - no other phase will ensure that block-local variable
352         // access data unification is done correctly. (2) is purely opportunistic
353         // and is meant as an compile-time optimization only.
354         
355         VariableAccessData* variable;
356         
357         if (node) {
358             variable = node->variableAccessData();
359             variable->mergeIsCaptured(isCaptured);
360             
361             if (!isCaptured) {
362                 switch (node->op()) {
363                 case GetLocal:
364                     return node;
365                 case SetLocal:
366                     return node->child1().node();
367                 default:
368                     break;
369                 }
370             }
371         } else
372             variable = newVariableAccessData(operand, isCaptured);
373         
374         node = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variable)));
375         m_currentBlock->variablesAtTail.local(local) = node;
376         return node;
377     }
378
379     Node* setLocal(VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
380     {
381         unsigned local = operand.toLocal();
382         bool isCaptured = m_codeBlock->isCaptured(operand, inlineCallFrame());
383         
384         if (setMode != ImmediateNakedSet) {
385             ArgumentPosition* argumentPosition = findArgumentPositionForLocal(operand);
386             if (isCaptured || argumentPosition)
387                 flushDirect(operand, argumentPosition);
388         }
389
390         VariableAccessData* variableAccessData = newVariableAccessData(operand, isCaptured);
391         variableAccessData->mergeStructureCheckHoistingFailed(
392             m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
393         variableAccessData->mergeCheckArrayHoistingFailed(
394             m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIndexingType));
395         Node* node = addToGraph(SetLocal, OpInfo(variableAccessData), value);
396         m_currentBlock->variablesAtTail.local(local) = node;
397         return node;
398     }
399
400     // Used in implementing get/set, above, where the operand is an argument.
401     Node* getArgument(VirtualRegister operand)
402     {
403         unsigned argument = operand.toArgument();
404         ASSERT(argument < m_numArguments);
405         
406         Node* node = m_currentBlock->variablesAtTail.argument(argument);
407         bool isCaptured = m_codeBlock->isCaptured(operand);
408
409         VariableAccessData* variable;
410         
411         if (node) {
412             variable = node->variableAccessData();
413             variable->mergeIsCaptured(isCaptured);
414             
415             if (!isCaptured) {
416                 switch (node->op()) {
417                 case GetLocal:
418                     return node;
419                 case SetLocal:
420                     return node->child1().node();
421                 default:
422                     break;
423                 }
424             }
425         } else
426             variable = newVariableAccessData(operand, isCaptured);
427         
428         node = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variable)));
429         m_currentBlock->variablesAtTail.argument(argument) = node;
430         return node;
431     }
432     Node* setArgument(VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
433     {
434         unsigned argument = operand.toArgument();
435         ASSERT(argument < m_numArguments);
436         
437         bool isCaptured = m_codeBlock->isCaptured(operand);
438
439         VariableAccessData* variableAccessData = newVariableAccessData(operand, isCaptured);
440
441         // Always flush arguments, except for 'this'. If 'this' is created by us,
442         // then make sure that it's never unboxed.
443         if (argument) {
444             if (setMode != ImmediateNakedSet)
445                 flushDirect(operand);
446         } else if (m_codeBlock->specializationKind() == CodeForConstruct)
447             variableAccessData->mergeShouldNeverUnbox(true);
448         
449         variableAccessData->mergeStructureCheckHoistingFailed(
450             m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
451         variableAccessData->mergeCheckArrayHoistingFailed(
452             m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIndexingType));
453         Node* node = addToGraph(SetLocal, OpInfo(variableAccessData), value);
454         m_currentBlock->variablesAtTail.argument(argument) = node;
455         return node;
456     }
457     
458     ArgumentPosition* findArgumentPositionForArgument(int argument)
459     {
460         InlineStackEntry* stack = m_inlineStackTop;
461         while (stack->m_inlineCallFrame)
462             stack = stack->m_caller;
463         return stack->m_argumentPositions[argument];
464     }
465     
466     ArgumentPosition* findArgumentPositionForLocal(VirtualRegister operand)
467     {
468         for (InlineStackEntry* stack = m_inlineStackTop; ; stack = stack->m_caller) {
469             InlineCallFrame* inlineCallFrame = stack->m_inlineCallFrame;
470             if (!inlineCallFrame)
471                 break;
472             if (operand.offset() < static_cast<int>(inlineCallFrame->stackOffset + JSStack::CallFrameHeaderSize))
473                 continue;
474             if (operand.offset() == inlineCallFrame->stackOffset + CallFrame::thisArgumentOffset())
475                 continue;
476             if (operand.offset() >= static_cast<int>(inlineCallFrame->stackOffset + CallFrame::thisArgumentOffset() + inlineCallFrame->arguments.size()))
477                 continue;
478             int argument = VirtualRegister(operand.offset() - inlineCallFrame->stackOffset).toArgument();
479             return stack->m_argumentPositions[argument];
480         }
481         return 0;
482     }
483     
484     ArgumentPosition* findArgumentPosition(VirtualRegister operand)
485     {
486         if (operand.isArgument())
487             return findArgumentPositionForArgument(operand.toArgument());
488         return findArgumentPositionForLocal(operand);
489     }
490
491     void flush(VirtualRegister operand)
492     {
493         flushDirect(m_inlineStackTop->remapOperand(operand));
494     }
495     
496     void flushDirect(VirtualRegister operand)
497     {
498         flushDirect(operand, findArgumentPosition(operand));
499     }
500     
501     void flushDirect(VirtualRegister operand, ArgumentPosition* argumentPosition)
502     {
503         bool isCaptured = m_codeBlock->isCaptured(operand, inlineCallFrame());
504         
505         ASSERT(!operand.isConstant());
506         
507         Node* node = m_currentBlock->variablesAtTail.operand(operand);
508         
509         VariableAccessData* variable;
510         
511         if (node) {
512             variable = node->variableAccessData();
513             variable->mergeIsCaptured(isCaptured);
514         } else
515             variable = newVariableAccessData(operand, isCaptured);
516         
517         node = addToGraph(Flush, OpInfo(variable));
518         m_currentBlock->variablesAtTail.operand(operand) = node;
519         if (argumentPosition)
520             argumentPosition->addVariable(variable);
521     }
522     
523     void flush(InlineStackEntry* inlineStackEntry)
524     {
525         int numArguments;
526         if (InlineCallFrame* inlineCallFrame = inlineStackEntry->m_inlineCallFrame) {
527             numArguments = inlineCallFrame->arguments.size();
528             if (inlineCallFrame->isClosureCall)
529                 flushDirect(inlineStackEntry->remapOperand(VirtualRegister(JSStack::Callee)));
530         } else
531             numArguments = inlineStackEntry->m_codeBlock->numParameters();
532         for (unsigned argument = numArguments; argument-- > 1;)
533             flushDirect(inlineStackEntry->remapOperand(virtualRegisterForArgument(argument)));
534         for (int local = 0; local < inlineStackEntry->m_codeBlock->m_numVars; ++local) {
535             if (!inlineStackEntry->m_codeBlock->isCaptured(virtualRegisterForLocal(local)))
536                 continue;
537             flushDirect(inlineStackEntry->remapOperand(virtualRegisterForLocal(local)));
538         }
539     }
540
541     void flushForTerminal()
542     {
543         for (InlineStackEntry* inlineStackEntry = m_inlineStackTop; inlineStackEntry; inlineStackEntry = inlineStackEntry->m_caller)
544             flush(inlineStackEntry);
545     }
546
547     void flushForReturn()
548     {
549         flush(m_inlineStackTop);
550     }
551     
552     void flushIfTerminal(SwitchData& data)
553     {
554         if (data.fallThrough.bytecodeIndex() > m_currentIndex)
555             return;
556         
557         for (unsigned i = data.cases.size(); i--;) {
558             if (data.cases[i].target.bytecodeIndex() > m_currentIndex)
559                 return;
560         }
561         
562         flushForTerminal();
563     }
564
565     // Assumes that the constant should be strongly marked.
566     Node* jsConstant(JSValue constantValue)
567     {
568         return addToGraph(JSConstant, OpInfo(m_graph.freezeStrong(constantValue)));
569     }
570
571     Node* weakJSConstant(JSValue constantValue)
572     {
573         return addToGraph(JSConstant, OpInfo(m_graph.freeze(constantValue)));
574     }
575
576     // Helper functions to get/set the this value.
577     Node* getThis()
578     {
579         return get(m_inlineStackTop->m_codeBlock->thisRegister());
580     }
581
582     void setThis(Node* value)
583     {
584         set(m_inlineStackTop->m_codeBlock->thisRegister(), value);
585     }
586
587     InlineCallFrame* inlineCallFrame()
588     {
589         return m_inlineStackTop->m_inlineCallFrame;
590     }
591
592     CodeOrigin currentCodeOrigin()
593     {
594         return CodeOrigin(m_currentIndex, inlineCallFrame());
595     }
596     
597     BranchData* branchData(unsigned taken, unsigned notTaken)
598     {
599         // We assume that branches originating from bytecode always have a fall-through. We
600         // use this assumption to avoid checking for the creation of terminal blocks.
601         ASSERT((taken > m_currentIndex) || (notTaken > m_currentIndex));
602         BranchData* data = m_graph.m_branchData.add();
603         *data = BranchData::withBytecodeIndices(taken, notTaken);
604         return data;
605     }
606     
607     Node* addToGraph(NodeType op, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
608     {
609         Node* result = m_graph.addNode(
610             SpecNone, op, NodeOrigin(currentCodeOrigin()), Edge(child1), Edge(child2),
611             Edge(child3));
612         ASSERT(op != Phi);
613         m_currentBlock->append(result);
614         return result;
615     }
616     Node* addToGraph(NodeType op, Edge child1, Edge child2 = Edge(), Edge child3 = Edge())
617     {
618         Node* result = m_graph.addNode(
619             SpecNone, op, NodeOrigin(currentCodeOrigin()), child1, child2, child3);
620         ASSERT(op != Phi);
621         m_currentBlock->append(result);
622         return result;
623     }
624     Node* addToGraph(NodeType op, OpInfo info, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
625     {
626         Node* result = m_graph.addNode(
627             SpecNone, op, NodeOrigin(currentCodeOrigin()), info, Edge(child1), Edge(child2),
628             Edge(child3));
629         ASSERT(op != Phi);
630         m_currentBlock->append(result);
631         return result;
632     }
633     Node* addToGraph(NodeType op, OpInfo info1, OpInfo info2, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
634     {
635         Node* result = m_graph.addNode(
636             SpecNone, op, NodeOrigin(currentCodeOrigin()), info1, info2,
637             Edge(child1), Edge(child2), Edge(child3));
638         ASSERT(op != Phi);
639         m_currentBlock->append(result);
640         return result;
641     }
642     
643     Node* addToGraph(Node::VarArgTag, NodeType op, OpInfo info1, OpInfo info2)
644     {
645         Node* result = m_graph.addNode(
646             SpecNone, Node::VarArg, op, NodeOrigin(currentCodeOrigin()), info1, info2,
647             m_graph.m_varArgChildren.size() - m_numPassedVarArgs, m_numPassedVarArgs);
648         ASSERT(op != Phi);
649         m_currentBlock->append(result);
650         
651         m_numPassedVarArgs = 0;
652         
653         return result;
654     }
655     
656     void removeLastNodeFromGraph(NodeType expectedNodeType)
657     {
658         Node* node = m_currentBlock->takeLast();
659         RELEASE_ASSERT(node->op() == expectedNodeType);
660         m_graph.m_allocator.free(node);
661     }
662
663     void addVarArgChild(Node* child)
664     {
665         m_graph.m_varArgChildren.append(Edge(child));
666         m_numPassedVarArgs++;
667     }
668     
669     Node* addCallWithoutSettingResult(
670         NodeType op, OpInfo opInfo, Node* callee, int argCount, int registerOffset,
671         SpeculatedType prediction)
672     {
673         addVarArgChild(callee);
674         size_t parameterSlots = JSStack::CallFrameHeaderSize - JSStack::CallerFrameAndPCSize + argCount;
675         if (parameterSlots > m_parameterSlots)
676             m_parameterSlots = parameterSlots;
677
678         int dummyThisArgument = op == Call || op == NativeCall ? 0 : 1;
679         for (int i = 0 + dummyThisArgument; i < argCount; ++i)
680             addVarArgChild(get(virtualRegisterForArgument(i, registerOffset)));
681
682         return addToGraph(Node::VarArg, op, opInfo, OpInfo(prediction));
683     }
684     
685     Node* addCall(
686         int result, NodeType op, OpInfo opInfo, Node* callee, int argCount, int registerOffset,
687         SpeculatedType prediction)
688     {
689         Node* call = addCallWithoutSettingResult(
690             op, opInfo, callee, argCount, registerOffset, prediction);
691         VirtualRegister resultReg(result);
692         if (resultReg.isValid())
693             set(VirtualRegister(result), call);
694         return call;
695     }
696     
697     Node* cellConstantWithStructureCheck(JSCell* object, Structure* structure)
698     {
699         Node* objectNode = weakJSConstant(object);
700         addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(structure)), objectNode);
701         return objectNode;
702     }
703     
704     SpeculatedType getPredictionWithoutOSRExit(unsigned bytecodeIndex)
705     {
706         ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
707         return m_inlineStackTop->m_profiledBlock->valueProfilePredictionForBytecodeOffset(locker, bytecodeIndex);
708     }
709
710     SpeculatedType getPrediction(unsigned bytecodeIndex)
711     {
712         SpeculatedType prediction = getPredictionWithoutOSRExit(bytecodeIndex);
713         
714         if (prediction == SpecNone) {
715             // We have no information about what values this node generates. Give up
716             // on executing this code, since we're likely to do more damage than good.
717             addToGraph(ForceOSRExit);
718         }
719         
720         return prediction;
721     }
722     
723     SpeculatedType getPredictionWithoutOSRExit()
724     {
725         return getPredictionWithoutOSRExit(m_currentIndex);
726     }
727     
728     SpeculatedType getPrediction()
729     {
730         return getPrediction(m_currentIndex);
731     }
732     
733     ArrayMode getArrayMode(ArrayProfile* profile, Array::Action action)
734     {
735         ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
736         profile->computeUpdatedPrediction(locker, m_inlineStackTop->m_profiledBlock);
737         return ArrayMode::fromObserved(locker, profile, action, false);
738     }
739     
740     ArrayMode getArrayMode(ArrayProfile* profile)
741     {
742         return getArrayMode(profile, Array::Read);
743     }
744     
745     ArrayMode getArrayModeConsideringSlowPath(ArrayProfile* profile, Array::Action action)
746     {
747         ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
748         
749         profile->computeUpdatedPrediction(locker, m_inlineStackTop->m_profiledBlock);
750         
751         bool makeSafe =
752             m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)
753             || profile->outOfBounds(locker);
754         
755         ArrayMode result = ArrayMode::fromObserved(locker, profile, action, makeSafe);
756         
757         return result;
758     }
759     
760     Node* makeSafe(Node* node)
761     {
762         if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
763             node->mergeFlags(NodeMayOverflowInDFG);
764         if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
765             node->mergeFlags(NodeMayNegZeroInDFG);
766         
767         if (!isX86() && node->op() == ArithMod)
768             return node;
769
770         if (!m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex))
771             return node;
772         
773         switch (node->op()) {
774         case UInt32ToNumber:
775         case ArithAdd:
776         case ArithSub:
777         case ValueAdd:
778         case ArithMod: // for ArithMod "MayOverflow" means we tried to divide by zero, or we saw double.
779             node->mergeFlags(NodeMayOverflowInBaseline);
780             break;
781             
782         case ArithNegate:
783             // Currently we can't tell the difference between a negation overflowing
784             // (i.e. -(1 << 31)) or generating negative zero (i.e. -0). If it took slow
785             // path then we assume that it did both of those things.
786             node->mergeFlags(NodeMayOverflowInBaseline);
787             node->mergeFlags(NodeMayNegZeroInBaseline);
788             break;
789
790         case ArithMul:
791             // FIXME: We should detect cases where we only overflowed but never created
792             // negative zero.
793             // https://bugs.webkit.org/show_bug.cgi?id=132470
794             if (m_inlineStackTop->m_profiledBlock->likelyToTakeDeepestSlowCase(m_currentIndex)
795                 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
796                 node->mergeFlags(NodeMayOverflowInBaseline | NodeMayNegZeroInBaseline);
797             else if (m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)
798                 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
799                 node->mergeFlags(NodeMayNegZeroInBaseline);
800             break;
801             
802         default:
803             RELEASE_ASSERT_NOT_REACHED();
804             break;
805         }
806         
807         return node;
808     }
809     
810     Node* makeDivSafe(Node* node)
811     {
812         ASSERT(node->op() == ArithDiv);
813         
814         if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
815             node->mergeFlags(NodeMayOverflowInDFG);
816         if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
817             node->mergeFlags(NodeMayNegZeroInDFG);
818         
819         // The main slow case counter for op_div in the old JIT counts only when
820         // the operands are not numbers. We don't care about that since we already
821         // have speculations in place that take care of that separately. We only
822         // care about when the outcome of the division is not an integer, which
823         // is what the special fast case counter tells us.
824         
825         if (!m_inlineStackTop->m_profiledBlock->couldTakeSpecialFastCase(m_currentIndex))
826             return node;
827         
828         // FIXME: It might be possible to make this more granular.
829         node->mergeFlags(NodeMayOverflowInBaseline | NodeMayNegZeroInBaseline);
830         
831         return node;
832     }
833     
834     void buildOperandMapsIfNecessary();
835     
836     VM* m_vm;
837     CodeBlock* m_codeBlock;
838     CodeBlock* m_profiledBlock;
839     Graph& m_graph;
840
841     // The current block being generated.
842     BasicBlock* m_currentBlock;
843     // The bytecode index of the current instruction being generated.
844     unsigned m_currentIndex;
845
846     FrozenValue* m_constantUndefined;
847     FrozenValue* m_constantNull;
848     FrozenValue* m_constantNaN;
849     FrozenValue* m_constantOne;
850     Vector<Node*, 16> m_constants;
851
852     // The number of arguments passed to the function.
853     unsigned m_numArguments;
854     // The number of locals (vars + temporaries) used in the function.
855     unsigned m_numLocals;
856     // The number of slots (in units of sizeof(Register)) that we need to
857     // preallocate for arguments to outgoing calls from this frame. This
858     // number includes the CallFrame slots that we initialize for the callee
859     // (but not the callee-initialized CallerFrame and ReturnPC slots).
860     // This number is 0 if and only if this function is a leaf.
861     unsigned m_parameterSlots;
862     // The number of var args passed to the next var arg node.
863     unsigned m_numPassedVarArgs;
864
865     HashMap<ConstantBufferKey, unsigned> m_constantBufferCache;
866     
867     Vector<VariableWatchpointSet*, 16> m_localWatchpoints;
868     
869     struct InlineStackEntry {
870         ByteCodeParser* m_byteCodeParser;
871         
872         CodeBlock* m_codeBlock;
873         CodeBlock* m_profiledBlock;
874         InlineCallFrame* m_inlineCallFrame;
875         
876         ScriptExecutable* executable() { return m_codeBlock->ownerExecutable(); }
877         
878         QueryableExitProfile m_exitProfile;
879         
880         // Remapping of identifier and constant numbers from the code block being
881         // inlined (inline callee) to the code block that we're inlining into
882         // (the machine code block, which is the transitive, though not necessarily
883         // direct, caller).
884         Vector<unsigned> m_identifierRemap;
885         Vector<unsigned> m_constantBufferRemap;
886         Vector<unsigned> m_switchRemap;
887         
888         // Blocks introduced by this code block, which need successor linking.
889         // May include up to one basic block that includes the continuation after
890         // the callsite in the caller. These must be appended in the order that they
891         // are created, but their bytecodeBegin values need not be in order as they
892         // are ignored.
893         Vector<UnlinkedBlock> m_unlinkedBlocks;
894         
895         // Potential block linking targets. Must be sorted by bytecodeBegin, and
896         // cannot have two blocks that have the same bytecodeBegin.
897         Vector<BasicBlock*> m_blockLinkingTargets;
898         
899         // If the callsite's basic block was split into two, then this will be
900         // the head of the callsite block. It needs its successors linked to the
901         // m_unlinkedBlocks, but not the other way around: there's no way for
902         // any blocks in m_unlinkedBlocks to jump back into this block.
903         BasicBlock* m_callsiteBlockHead;
904         
905         // Does the callsite block head need linking? This is typically true
906         // but will be false for the machine code block's inline stack entry
907         // (since that one is not inlined) and for cases where an inline callee
908         // did the linking for us.
909         bool m_callsiteBlockHeadNeedsLinking;
910         
911         VirtualRegister m_returnValue;
912         
913         // Speculations about variable types collected from the profiled code block,
914         // which are based on OSR exit profiles that past DFG compilatins of this
915         // code block had gathered.
916         LazyOperandValueProfileParser m_lazyOperands;
917         
918         CallLinkInfoMap m_callLinkInfos;
919         StubInfoMap m_stubInfos;
920         
921         // Did we see any returns? We need to handle the (uncommon but necessary)
922         // case where a procedure that does not return was inlined.
923         bool m_didReturn;
924         
925         // Did we have any early returns?
926         bool m_didEarlyReturn;
927         
928         // Pointers to the argument position trackers for this slice of code.
929         Vector<ArgumentPosition*> m_argumentPositions;
930         
931         InlineStackEntry* m_caller;
932         
933         InlineStackEntry(
934             ByteCodeParser*,
935             CodeBlock*,
936             CodeBlock* profiledBlock,
937             BasicBlock* callsiteBlockHead,
938             JSFunction* callee, // Null if this is a closure call.
939             VirtualRegister returnValueVR,
940             VirtualRegister inlineCallFrameStart,
941             int argumentCountIncludingThis,
942             InlineCallFrame::Kind);
943         
944         ~InlineStackEntry()
945         {
946             m_byteCodeParser->m_inlineStackTop = m_caller;
947         }
948         
949         VirtualRegister remapOperand(VirtualRegister operand) const
950         {
951             if (!m_inlineCallFrame)
952                 return operand;
953             
954             ASSERT(!operand.isConstant());
955
956             return VirtualRegister(operand.offset() + m_inlineCallFrame->stackOffset);
957         }
958     };
959     
960     InlineStackEntry* m_inlineStackTop;
961     
962     struct DelayedSetLocal {
963         VirtualRegister m_operand;
964         Node* m_value;
965         
966         DelayedSetLocal() { }
967         DelayedSetLocal(VirtualRegister operand, Node* value)
968             : m_operand(operand)
969             , m_value(value)
970         {
971         }
972         
973         Node* execute(ByteCodeParser* parser, SetMode setMode = NormalSet)
974         {
975             if (m_operand.isArgument())
976                 return parser->setArgument(m_operand, m_value, setMode);
977             return parser->setLocal(m_operand, m_value, setMode);
978         }
979     };
980     
981     Vector<DelayedSetLocal, 2> m_setLocalQueue;
982
983     // Have we built operand maps? We initialize them lazily, and only when doing
984     // inlining.
985     bool m_haveBuiltOperandMaps;
986     // Mapping between identifier names and numbers.
987     BorrowedIdentifierMap m_identifierMap;
988     
989     CodeBlock* m_dfgCodeBlock;
990     CallLinkStatus::ContextMap m_callContextMap;
991     StubInfoMap m_dfgStubInfos;
992     
993     Instruction* m_currentInstruction;
994 };
995
996 #define NEXT_OPCODE(name) \
997     m_currentIndex += OPCODE_LENGTH(name); \
998     continue
999
1000 #define LAST_OPCODE(name) \
1001     m_currentIndex += OPCODE_LENGTH(name); \
1002     return shouldContinueParsing
1003
1004 void ByteCodeParser::handleCall(Instruction* pc, NodeType op, CodeSpecializationKind kind)
1005 {
1006     ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct));
1007     handleCall(
1008         pc[1].u.operand, op, kind, OPCODE_LENGTH(op_call),
1009         pc[2].u.operand, pc[3].u.operand, -pc[4].u.operand);
1010 }
1011
1012 void ByteCodeParser::handleCall(
1013     int result, NodeType op, CodeSpecializationKind kind, unsigned instructionSize,
1014     int callee, int argumentCountIncludingThis, int registerOffset)
1015 {
1016     Node* callTarget = get(VirtualRegister(callee));
1017     
1018     CallLinkStatus callLinkStatus = CallLinkStatus::computeFor(
1019         m_inlineStackTop->m_profiledBlock, currentCodeOrigin(),
1020         m_inlineStackTop->m_callLinkInfos, m_callContextMap);
1021     
1022     handleCall(
1023         result, op, InlineCallFrame::kindFor(kind), instructionSize, callTarget,
1024         argumentCountIncludingThis, registerOffset, callLinkStatus);
1025 }
1026     
1027 void ByteCodeParser::handleCall(
1028     int result, NodeType op, InlineCallFrame::Kind kind, unsigned instructionSize,
1029     Node* callTarget, int argumentCountIncludingThis, int registerOffset,
1030     CallLinkStatus callLinkStatus)
1031 {
1032     handleCall(
1033         result, op, kind, instructionSize, callTarget, argumentCountIncludingThis,
1034         registerOffset, callLinkStatus, getPrediction());
1035 }
1036
1037 void ByteCodeParser::handleCall(
1038     int result, NodeType op, InlineCallFrame::Kind kind, unsigned instructionSize,
1039     Node* callTarget, int argumentCountIncludingThis, int registerOffset,
1040     CallLinkStatus callLinkStatus, SpeculatedType prediction)
1041 {
1042     ASSERT(registerOffset <= 0);
1043     
1044     if (callTarget->hasConstant())
1045         callLinkStatus = CallLinkStatus(callTarget->asJSValue()).setIsProved(true);
1046     
1047     if (Options::verboseDFGByteCodeParsing())
1048         dataLog("    Handling call at ", currentCodeOrigin(), ": ", callLinkStatus, "\n");
1049     
1050     if (!callLinkStatus.canOptimize()) {
1051         // Oddly, this conflates calls that haven't executed with calls that behaved sufficiently polymorphically
1052         // that we cannot optimize them.
1053         
1054         addCall(result, op, OpInfo(), callTarget, argumentCountIncludingThis, registerOffset, prediction);
1055         return;
1056     }
1057     
1058     unsigned nextOffset = m_currentIndex + instructionSize;
1059     
1060     OpInfo callOpInfo;
1061     
1062     if (handleInlining(callTarget, result, callLinkStatus, registerOffset, argumentCountIncludingThis, nextOffset, op, kind, prediction)) {
1063         if (m_graph.compilation())
1064             m_graph.compilation()->noticeInlinedCall();
1065         return;
1066     }
1067     
1068 #if ENABLE(FTL_NATIVE_CALL_INLINING)
1069     if (isFTL(m_graph.m_plan.mode) && Options::optimizeNativeCalls() && callLinkStatus.size() == 1 && !callLinkStatus.couldTakeSlowPath()) {
1070         CallVariant callee = callLinkStatus[0];
1071         JSFunction* function = callee.function();
1072         CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1073         if (function && function->isHostFunction()) {
1074             emitFunctionChecks(callee, callTarget, registerOffset, specializationKind);
1075             callOpInfo = OpInfo(m_graph.freeze(function));
1076
1077             if (op == Call)
1078                 op = NativeCall;
1079             else {
1080                 ASSERT(op == Construct);
1081                 op = NativeConstruct;
1082             }
1083         }
1084     }
1085 #endif
1086     
1087     addCall(result, op, callOpInfo, callTarget, argumentCountIncludingThis, registerOffset, prediction);
1088 }
1089
1090 void ByteCodeParser::emitFunctionChecks(CallVariant callee, Node* callTarget, int registerOffset, CodeSpecializationKind kind)
1091 {
1092     Node* thisArgument;
1093     if (kind == CodeForCall)
1094         thisArgument = get(virtualRegisterForArgument(0, registerOffset));
1095     else
1096         thisArgument = 0;
1097
1098     JSCell* calleeCell;
1099     Node* callTargetForCheck;
1100     if (callee.isClosureCall()) {
1101         calleeCell = callee.executable();
1102         callTargetForCheck = addToGraph(GetExecutable, callTarget);
1103     } else {
1104         calleeCell = callee.nonExecutableCallee();
1105         callTargetForCheck = callTarget;
1106     }
1107     
1108     ASSERT(calleeCell);
1109     addToGraph(CheckCell, OpInfo(m_graph.freeze(calleeCell)), callTargetForCheck, thisArgument);
1110 }
1111
1112 void ByteCodeParser::undoFunctionChecks(CallVariant callee)
1113 {
1114     removeLastNodeFromGraph(CheckCell);
1115     if (callee.isClosureCall())
1116         removeLastNodeFromGraph(GetExecutable);
1117 }
1118
1119 void ByteCodeParser::emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis, CodeSpecializationKind kind)
1120 {
1121     for (int i = kind == CodeForCall ? 0 : 1; i < argumentCountIncludingThis; ++i)
1122         addToGraph(Phantom, get(virtualRegisterForArgument(i, registerOffset)));
1123 }
1124
1125 unsigned ByteCodeParser::inliningCost(CallVariant callee, int argumentCountIncludingThis, CodeSpecializationKind kind)
1126 {
1127     if (verbose)
1128         dataLog("Considering inlining ", callee, " into ", currentCodeOrigin(), "\n");
1129     
1130     FunctionExecutable* executable = callee.functionExecutable();
1131     if (!executable) {
1132         if (verbose)
1133             dataLog("    Failing because there is no function executable.");
1134         return UINT_MAX;
1135     }
1136     
1137     // Does the number of arguments we're passing match the arity of the target? We currently
1138     // inline only if the number of arguments passed is greater than or equal to the number
1139     // arguments expected.
1140     if (static_cast<int>(executable->parameterCount()) + 1 > argumentCountIncludingThis) {
1141         if (verbose)
1142             dataLog("    Failing because of arity mismatch.\n");
1143         return UINT_MAX;
1144     }
1145     
1146     // Do we have a code block, and does the code block's size match the heuristics/requirements for
1147     // being an inline candidate? We might not have a code block if code was thrown away or if we
1148     // simply hadn't actually made this call yet. We could still theoretically attempt to inline it
1149     // if we had a static proof of what was being called; this might happen for example if you call a
1150     // global function, where watchpointing gives us static information. Overall, it's a rare case
1151     // because we expect that any hot callees would have already been compiled.
1152     CodeBlock* codeBlock = executable->baselineCodeBlockFor(kind);
1153     if (!codeBlock) {
1154         if (verbose)
1155             dataLog("    Failing because no code block available.\n");
1156         return UINT_MAX;
1157     }
1158     CapabilityLevel capabilityLevel = inlineFunctionForCapabilityLevel(
1159         codeBlock, kind, callee.isClosureCall());
1160     if (!canInline(capabilityLevel)) {
1161         if (verbose)
1162             dataLog("    Failing because the function is not inlineable.\n");
1163         return UINT_MAX;
1164     }
1165     
1166     // Check if the caller is already too large. We do this check here because that's just
1167     // where we happen to also have the callee's code block, and we want that for the
1168     // purpose of unsetting SABI.
1169     if (!isSmallEnoughToInlineCodeInto(m_codeBlock)) {
1170         codeBlock->m_shouldAlwaysBeInlined = false;
1171         if (verbose)
1172             dataLog("    Failing because the caller is too large.\n");
1173         return UINT_MAX;
1174     }
1175     
1176     // FIXME: this should be better at predicting how much bloat we will introduce by inlining
1177     // this function.
1178     // https://bugs.webkit.org/show_bug.cgi?id=127627
1179     
1180     // Have we exceeded inline stack depth, or are we trying to inline a recursive call to
1181     // too many levels? If either of these are detected, then don't inline. We adjust our
1182     // heuristics if we are dealing with a function that cannot otherwise be compiled.
1183     
1184     unsigned depth = 0;
1185     unsigned recursion = 0;
1186     
1187     for (InlineStackEntry* entry = m_inlineStackTop; entry; entry = entry->m_caller) {
1188         ++depth;
1189         if (depth >= Options::maximumInliningDepth()) {
1190             if (verbose)
1191                 dataLog("    Failing because depth exceeded.\n");
1192             return UINT_MAX;
1193         }
1194         
1195         if (entry->executable() == executable) {
1196             ++recursion;
1197             if (recursion >= Options::maximumInliningRecursion()) {
1198                 if (verbose)
1199                     dataLog("    Failing because recursion detected.\n");
1200                 return UINT_MAX;
1201             }
1202         }
1203     }
1204     
1205     if (verbose)
1206         dataLog("    Inlining should be possible.\n");
1207     
1208     // It might be possible to inline.
1209     return codeBlock->instructionCount();
1210 }
1211
1212 void ByteCodeParser::inlineCall(Node* callTargetNode, int resultOperand, CallVariant callee, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind kind, CallerLinkability callerLinkability)
1213 {
1214     CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1215     
1216     ASSERT(inliningCost(callee, argumentCountIncludingThis, specializationKind) != UINT_MAX);
1217     
1218     CodeBlock* codeBlock = callee.functionExecutable()->baselineCodeBlockFor(specializationKind);
1219
1220     // FIXME: Don't flush constants!
1221     
1222     int inlineCallFrameStart = m_inlineStackTop->remapOperand(VirtualRegister(registerOffset)).offset() + JSStack::CallFrameHeaderSize;
1223     
1224     ensureLocals(
1225         VirtualRegister(inlineCallFrameStart).toLocal() + 1 +
1226         JSStack::CallFrameHeaderSize + codeBlock->m_numCalleeRegisters);
1227     
1228     size_t argumentPositionStart = m_graph.m_argumentPositions.size();
1229
1230     VirtualRegister resultReg(resultOperand);
1231     if (resultReg.isValid())
1232         resultReg = m_inlineStackTop->remapOperand(resultReg);
1233     
1234     InlineStackEntry inlineStackEntry(
1235         this, codeBlock, codeBlock, m_graph.lastBlock(), callee.function(), resultReg,
1236         (VirtualRegister)inlineCallFrameStart, argumentCountIncludingThis, kind);
1237     
1238     // This is where the actual inlining really happens.
1239     unsigned oldIndex = m_currentIndex;
1240     m_currentIndex = 0;
1241
1242     InlineVariableData inlineVariableData;
1243     inlineVariableData.inlineCallFrame = m_inlineStackTop->m_inlineCallFrame;
1244     inlineVariableData.argumentPositionStart = argumentPositionStart;
1245     inlineVariableData.calleeVariable = 0;
1246     
1247     RELEASE_ASSERT(
1248         m_inlineStackTop->m_inlineCallFrame->isClosureCall
1249         == callee.isClosureCall());
1250     if (callee.isClosureCall()) {
1251         VariableAccessData* calleeVariable =
1252             set(VirtualRegister(JSStack::Callee), callTargetNode, ImmediateNakedSet)->variableAccessData();
1253         
1254         calleeVariable->mergeShouldNeverUnbox(true);
1255         
1256         inlineVariableData.calleeVariable = calleeVariable;
1257     }
1258     
1259     m_graph.m_inlineVariableData.append(inlineVariableData);
1260     
1261     parseCodeBlock();
1262     clearCaches(); // Reset our state now that we're back to the outer code.
1263     
1264     m_currentIndex = oldIndex;
1265     
1266     // If the inlined code created some new basic blocks, then we have linking to do.
1267     if (inlineStackEntry.m_callsiteBlockHead != m_graph.lastBlock()) {
1268         
1269         ASSERT(!inlineStackEntry.m_unlinkedBlocks.isEmpty());
1270         if (inlineStackEntry.m_callsiteBlockHeadNeedsLinking)
1271             linkBlock(inlineStackEntry.m_callsiteBlockHead, inlineStackEntry.m_blockLinkingTargets);
1272         else
1273             ASSERT(inlineStackEntry.m_callsiteBlockHead->isLinked);
1274         
1275         if (callerLinkability == CallerDoesNormalLinking)
1276             cancelLinkingForBlock(inlineStackEntry.m_caller, inlineStackEntry.m_callsiteBlockHead);
1277         
1278         linkBlocks(inlineStackEntry.m_unlinkedBlocks, inlineStackEntry.m_blockLinkingTargets);
1279     } else
1280         ASSERT(inlineStackEntry.m_unlinkedBlocks.isEmpty());
1281     
1282     BasicBlock* lastBlock = m_graph.lastBlock();
1283     // If there was a return, but no early returns, then we're done. We allow parsing of
1284     // the caller to continue in whatever basic block we're in right now.
1285     if (!inlineStackEntry.m_didEarlyReturn && inlineStackEntry.m_didReturn) {
1286         ASSERT(lastBlock->isEmpty() || !lastBlock->last()->isTerminal());
1287         
1288         // If we created new blocks then the last block needs linking, but in the
1289         // caller. It doesn't need to be linked to, but it needs outgoing links.
1290         if (!inlineStackEntry.m_unlinkedBlocks.isEmpty()) {
1291             // For debugging purposes, set the bytecodeBegin. Note that this doesn't matter
1292             // for release builds because this block will never serve as a potential target
1293             // in the linker's binary search.
1294             lastBlock->bytecodeBegin = m_currentIndex;
1295             if (callerLinkability == CallerDoesNormalLinking) {
1296                 if (verbose)
1297                     dataLog("Adding unlinked block ", RawPointer(m_graph.lastBlock()), " (one return)\n");
1298                 m_inlineStackTop->m_caller->m_unlinkedBlocks.append(UnlinkedBlock(m_graph.lastBlock()));
1299             }
1300         }
1301         
1302         m_currentBlock = m_graph.lastBlock();
1303         return;
1304     }
1305     
1306     // If we get to this point then all blocks must end in some sort of terminals.
1307     ASSERT(lastBlock->last()->isTerminal());
1308
1309     // Need to create a new basic block for the continuation at the caller.
1310     RefPtr<BasicBlock> block = adoptRef(new BasicBlock(nextOffset, m_numArguments, m_numLocals, PNaN));
1311
1312     // Link the early returns to the basic block we're about to create.
1313     for (size_t i = 0; i < inlineStackEntry.m_unlinkedBlocks.size(); ++i) {
1314         if (!inlineStackEntry.m_unlinkedBlocks[i].m_needsEarlyReturnLinking)
1315             continue;
1316         BasicBlock* blockToLink = inlineStackEntry.m_unlinkedBlocks[i].m_block;
1317         ASSERT(!blockToLink->isLinked);
1318         Node* node = blockToLink->last();
1319         ASSERT(node->op() == Jump);
1320         ASSERT(!node->targetBlock());
1321         node->targetBlock() = block.get();
1322         inlineStackEntry.m_unlinkedBlocks[i].m_needsEarlyReturnLinking = false;
1323         if (verbose)
1324             dataLog("Marking ", RawPointer(blockToLink), " as linked (jumps to return)\n");
1325         blockToLink->didLink();
1326     }
1327     
1328     m_currentBlock = block.get();
1329     ASSERT(m_inlineStackTop->m_caller->m_blockLinkingTargets.isEmpty() || m_inlineStackTop->m_caller->m_blockLinkingTargets.last()->bytecodeBegin < nextOffset);
1330     if (verbose)
1331         dataLog("Adding unlinked block ", RawPointer(block.get()), " (many returns)\n");
1332     if (callerLinkability == CallerDoesNormalLinking) {
1333         m_inlineStackTop->m_caller->m_unlinkedBlocks.append(UnlinkedBlock(block.get()));
1334         m_inlineStackTop->m_caller->m_blockLinkingTargets.append(block.get());
1335     }
1336     m_graph.appendBlock(block);
1337     prepareToParseBlock();
1338 }
1339
1340 void ByteCodeParser::cancelLinkingForBlock(InlineStackEntry* inlineStackEntry, BasicBlock* block)
1341 {
1342     // It's possible that the callsite block head is not owned by the caller.
1343     if (!inlineStackEntry->m_unlinkedBlocks.isEmpty()) {
1344         // It's definitely owned by the caller, because the caller created new blocks.
1345         // Assert that this all adds up.
1346         ASSERT_UNUSED(block, inlineStackEntry->m_unlinkedBlocks.last().m_block == block);
1347         ASSERT(inlineStackEntry->m_unlinkedBlocks.last().m_needsNormalLinking);
1348         inlineStackEntry->m_unlinkedBlocks.last().m_needsNormalLinking = false;
1349     } else {
1350         // It's definitely not owned by the caller. Tell the caller that he does not
1351         // need to link his callsite block head, because we did it for him.
1352         ASSERT(inlineStackEntry->m_callsiteBlockHeadNeedsLinking);
1353         ASSERT_UNUSED(block, inlineStackEntry->m_callsiteBlockHead == block);
1354         inlineStackEntry->m_callsiteBlockHeadNeedsLinking = false;
1355     }
1356 }
1357
1358 bool ByteCodeParser::attemptToInlineCall(Node* callTargetNode, int resultOperand, CallVariant callee, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind kind, CallerLinkability callerLinkability, SpeculatedType prediction, unsigned& inliningBalance)
1359 {
1360     CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1361     
1362     if (!inliningBalance)
1363         return false;
1364     
1365     if (InternalFunction* function = callee.internalFunction()) {
1366         if (handleConstantInternalFunction(resultOperand, function, registerOffset, argumentCountIncludingThis, specializationKind)) {
1367             addToGraph(Phantom, callTargetNode);
1368             emitArgumentPhantoms(registerOffset, argumentCountIncludingThis, specializationKind);
1369             inliningBalance--;
1370             return true;
1371         }
1372         return false;
1373     }
1374     
1375     Intrinsic intrinsic = callee.intrinsicFor(specializationKind);
1376     if (intrinsic != NoIntrinsic) {
1377         if (handleIntrinsic(resultOperand, intrinsic, registerOffset, argumentCountIncludingThis, prediction)) {
1378             addToGraph(Phantom, callTargetNode);
1379             emitArgumentPhantoms(registerOffset, argumentCountIncludingThis, specializationKind);
1380             inliningBalance--;
1381             return true;
1382         }
1383         return false;
1384     }
1385     
1386     unsigned myInliningCost = inliningCost(callee, argumentCountIncludingThis, specializationKind);
1387     if (myInliningCost > inliningBalance)
1388         return false;
1389     
1390     inlineCall(callTargetNode, resultOperand, callee, registerOffset, argumentCountIncludingThis, nextOffset, kind, callerLinkability);
1391     inliningBalance -= myInliningCost;
1392     return true;
1393 }
1394
1395 bool ByteCodeParser::handleInlining(Node* callTargetNode, int resultOperand, const CallLinkStatus& callLinkStatus, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, NodeType callOp, InlineCallFrame::Kind kind, SpeculatedType prediction)
1396 {
1397     if (verbose) {
1398         dataLog("Handling inlining...\n");
1399         dataLog("Stack: ", currentCodeOrigin(), "\n");
1400     }
1401     CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1402     
1403     if (!callLinkStatus.size()) {
1404         if (verbose)
1405             dataLog("Bailing inlining.\n");
1406         return false;
1407     }
1408     
1409     unsigned inliningBalance = Options::maximumFunctionForCallInlineCandidateInstructionCount();
1410     if (specializationKind == CodeForConstruct)
1411         inliningBalance = std::min(inliningBalance, Options::maximumFunctionForConstructInlineCandidateInstructionCount());
1412     if (callLinkStatus.isClosureCall())
1413         inliningBalance = std::min(inliningBalance, Options::maximumFunctionForClosureCallInlineCandidateInstructionCount());
1414     
1415     // First check if we can avoid creating control flow. Our inliner does some CFG
1416     // simplification on the fly and this helps reduce compile times, but we can only leverage
1417     // this in cases where we don't need control flow diamonds to check the callee.
1418     if (!callLinkStatus.couldTakeSlowPath() && callLinkStatus.size() == 1) {
1419         emitFunctionChecks(
1420             callLinkStatus[0], callTargetNode, registerOffset, specializationKind);
1421         bool result = attemptToInlineCall(
1422             callTargetNode, resultOperand, callLinkStatus[0], registerOffset,
1423             argumentCountIncludingThis, nextOffset, kind, CallerDoesNormalLinking, prediction,
1424             inliningBalance);
1425         if (!result && !callLinkStatus.isProved())
1426             undoFunctionChecks(callLinkStatus[0]);
1427         if (verbose) {
1428             dataLog("Done inlining (simple).\n");
1429             dataLog("Stack: ", currentCodeOrigin(), "\n");
1430         }
1431         return result;
1432     }
1433     
1434     // We need to create some kind of switch over callee. For now we only do this if we believe that
1435     // we're in the top tier. We have two reasons for this: first, it provides us an opportunity to
1436     // do more detailed polyvariant/polymorphic profiling; and second, it reduces compile times in
1437     // the DFG. And by polyvariant profiling we mean polyvariant profiling of *this* call. Note that
1438     // we could improve that aspect of this by doing polymorphic inlining but having the profiling
1439     // also. Currently we opt against this, but it could be interesting. That would require having a
1440     // separate node for call edge profiling.
1441     // FIXME: Introduce the notion of a separate call edge profiling node.
1442     // https://bugs.webkit.org/show_bug.cgi?id=136033
1443     if (!isFTL(m_graph.m_plan.mode) || !Options::enablePolymorphicCallInlining()) {
1444         if (verbose) {
1445             dataLog("Bailing inlining (hard).\n");
1446             dataLog("Stack: ", currentCodeOrigin(), "\n");
1447         }
1448         return false;
1449     }
1450     
1451     unsigned oldOffset = m_currentIndex;
1452     
1453     bool allAreClosureCalls = true;
1454     bool allAreDirectCalls = true;
1455     for (unsigned i = callLinkStatus.size(); i--;) {
1456         if (callLinkStatus[i].isClosureCall())
1457             allAreDirectCalls = false;
1458         else
1459             allAreClosureCalls = false;
1460     }
1461     
1462     Node* thingToSwitchOn;
1463     if (allAreDirectCalls)
1464         thingToSwitchOn = callTargetNode;
1465     else if (allAreClosureCalls)
1466         thingToSwitchOn = addToGraph(GetExecutable, callTargetNode);
1467     else {
1468         // FIXME: We should be able to handle this case, but it's tricky and we don't know of cases
1469         // where it would be beneficial. It might be best to handle these cases as if all calls were
1470         // closure calls.
1471         // https://bugs.webkit.org/show_bug.cgi?id=136020
1472         if (verbose) {
1473             dataLog("Bailing inlining (mix).\n");
1474             dataLog("Stack: ", currentCodeOrigin(), "\n");
1475         }
1476         return false;
1477     }
1478     
1479     if (verbose) {
1480         dataLog("Doing hard inlining...\n");
1481         dataLog("Stack: ", currentCodeOrigin(), "\n");
1482     }
1483     
1484     // This makes me wish that we were in SSA all the time. We need to pick a variable into which to
1485     // store the callee so that it will be accessible to all of the blocks we're about to create. We
1486     // get away with doing an immediate-set here because we wouldn't have performed any side effects
1487     // yet.
1488     if (verbose)
1489         dataLog("Register offset: ", registerOffset);
1490     VirtualRegister calleeReg(registerOffset + JSStack::Callee);
1491     calleeReg = m_inlineStackTop->remapOperand(calleeReg);
1492     if (verbose)
1493         dataLog("Callee is going to be ", calleeReg, "\n");
1494     setDirect(calleeReg, callTargetNode, ImmediateSetWithFlush);
1495     
1496     SwitchData& data = *m_graph.m_switchData.add();
1497     data.kind = SwitchCell;
1498     addToGraph(Switch, OpInfo(&data), thingToSwitchOn);
1499     
1500     BasicBlock* originBlock = m_currentBlock;
1501     if (verbose)
1502         dataLog("Marking ", RawPointer(originBlock), " as linked (origin of poly inline)\n");
1503     originBlock->didLink();
1504     cancelLinkingForBlock(m_inlineStackTop, originBlock);
1505     
1506     // Each inlined callee will have a landing block that it returns at. They should all have jumps
1507     // to the continuation block, which we create last.
1508     Vector<BasicBlock*> landingBlocks;
1509     
1510     // We may force this true if we give up on inlining any of the edges.
1511     bool couldTakeSlowPath = callLinkStatus.couldTakeSlowPath();
1512     
1513     if (verbose)
1514         dataLog("About to loop over functions at ", currentCodeOrigin(), ".\n");
1515     
1516     for (unsigned i = 0; i < callLinkStatus.size(); ++i) {
1517         m_currentIndex = oldOffset;
1518         RefPtr<BasicBlock> block = adoptRef(new BasicBlock(UINT_MAX, m_numArguments, m_numLocals, PNaN));
1519         m_currentBlock = block.get();
1520         m_graph.appendBlock(block);
1521         prepareToParseBlock();
1522         
1523         Node* myCallTargetNode = getDirect(calleeReg);
1524         
1525         bool inliningResult = attemptToInlineCall(
1526             myCallTargetNode, resultOperand, callLinkStatus[i], registerOffset,
1527             argumentCountIncludingThis, nextOffset, kind, CallerLinksManually, prediction,
1528             inliningBalance);
1529         
1530         if (!inliningResult) {
1531             // That failed so we let the block die. Nothing interesting should have been added to
1532             // the block. We also give up on inlining any of the (less frequent) callees.
1533             ASSERT(m_currentBlock == block.get());
1534             ASSERT(m_graph.m_blocks.last() == block);
1535             m_graph.killBlockAndItsContents(block.get());
1536             m_graph.m_blocks.removeLast();
1537             
1538             // The fact that inlining failed means we need a slow path.
1539             couldTakeSlowPath = true;
1540             break;
1541         }
1542         
1543         JSCell* thingToCaseOn;
1544         if (allAreDirectCalls)
1545             thingToCaseOn = callLinkStatus[i].nonExecutableCallee();
1546         else {
1547             ASSERT(allAreClosureCalls);
1548             thingToCaseOn = callLinkStatus[i].executable();
1549         }
1550         data.cases.append(SwitchCase(m_graph.freeze(thingToCaseOn), block.get()));
1551         m_currentIndex = nextOffset;
1552         processSetLocalQueue(); // This only comes into play for intrinsics, since normal inlined code will leave an empty queue.
1553         addToGraph(Jump);
1554         if (verbose)
1555             dataLog("Marking ", RawPointer(m_currentBlock), " as linked (tail of poly inlinee)\n");
1556         m_currentBlock->didLink();
1557         landingBlocks.append(m_currentBlock);
1558
1559         if (verbose)
1560             dataLog("Finished inlining ", callLinkStatus[i], " at ", currentCodeOrigin(), ".\n");
1561     }
1562     
1563     RefPtr<BasicBlock> slowPathBlock = adoptRef(
1564         new BasicBlock(UINT_MAX, m_numArguments, m_numLocals, PNaN));
1565     m_currentIndex = oldOffset;
1566     data.fallThrough = BranchTarget(slowPathBlock.get());
1567     m_graph.appendBlock(slowPathBlock);
1568     if (verbose)
1569         dataLog("Marking ", RawPointer(slowPathBlock.get()), " as linked (slow path block)\n");
1570     slowPathBlock->didLink();
1571     prepareToParseBlock();
1572     m_currentBlock = slowPathBlock.get();
1573     Node* myCallTargetNode = getDirect(calleeReg);
1574     if (couldTakeSlowPath) {
1575         addCall(
1576             resultOperand, callOp, OpInfo(), myCallTargetNode, argumentCountIncludingThis,
1577             registerOffset, prediction);
1578     } else {
1579         addToGraph(CheckBadCell);
1580         addToGraph(Phantom, myCallTargetNode);
1581         emitArgumentPhantoms(registerOffset, argumentCountIncludingThis, specializationKind);
1582         
1583         set(VirtualRegister(resultOperand), addToGraph(BottomValue));
1584     }
1585
1586     m_currentIndex = nextOffset;
1587     processSetLocalQueue();
1588     addToGraph(Jump);
1589     landingBlocks.append(m_currentBlock);
1590     
1591     RefPtr<BasicBlock> continuationBlock = adoptRef(
1592         new BasicBlock(UINT_MAX, m_numArguments, m_numLocals, PNaN));
1593     m_graph.appendBlock(continuationBlock);
1594     if (verbose)
1595         dataLog("Adding unlinked block ", RawPointer(continuationBlock.get()), " (continuation)\n");
1596     m_inlineStackTop->m_unlinkedBlocks.append(UnlinkedBlock(continuationBlock.get()));
1597     prepareToParseBlock();
1598     m_currentBlock = continuationBlock.get();
1599     
1600     for (unsigned i = landingBlocks.size(); i--;)
1601         landingBlocks[i]->last()->targetBlock() = continuationBlock.get();
1602     
1603     m_currentIndex = oldOffset;
1604     
1605     if (verbose) {
1606         dataLog("Done inlining (hard).\n");
1607         dataLog("Stack: ", currentCodeOrigin(), "\n");
1608     }
1609     return true;
1610 }
1611
1612 bool ByteCodeParser::handleMinMax(int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis)
1613 {
1614     if (argumentCountIncludingThis == 1) { // Math.min()
1615         set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN)));
1616         return true;
1617     }
1618      
1619     if (argumentCountIncludingThis == 2) { // Math.min(x)
1620         Node* result = get(VirtualRegister(virtualRegisterForArgument(1, registerOffset)));
1621         addToGraph(Phantom, Edge(result, NumberUse));
1622         set(VirtualRegister(resultOperand), result);
1623         return true;
1624     }
1625     
1626     if (argumentCountIncludingThis == 3) { // Math.min(x, y)
1627         set(VirtualRegister(resultOperand), addToGraph(op, get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset))));
1628         return true;
1629     }
1630     
1631     // Don't handle >=3 arguments for now.
1632     return false;
1633 }
1634
1635 bool ByteCodeParser::handleIntrinsic(int resultOperand, Intrinsic intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction)
1636 {
1637     switch (intrinsic) {
1638     case AbsIntrinsic: {
1639         if (argumentCountIncludingThis == 1) { // Math.abs()
1640             set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN)));
1641             return true;
1642         }
1643
1644         if (!MacroAssembler::supportsFloatingPointAbs())
1645             return false;
1646
1647         Node* node = addToGraph(ArithAbs, get(virtualRegisterForArgument(1, registerOffset)));
1648         if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
1649             node->mergeFlags(NodeMayOverflowInDFG);
1650         set(VirtualRegister(resultOperand), node);
1651         return true;
1652     }
1653
1654     case MinIntrinsic:
1655         return handleMinMax(resultOperand, ArithMin, registerOffset, argumentCountIncludingThis);
1656         
1657     case MaxIntrinsic:
1658         return handleMinMax(resultOperand, ArithMax, registerOffset, argumentCountIncludingThis);
1659         
1660     case SqrtIntrinsic:
1661     case CosIntrinsic:
1662     case SinIntrinsic: {
1663         if (argumentCountIncludingThis == 1) {
1664             set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN)));
1665             return true;
1666         }
1667         
1668         switch (intrinsic) {
1669         case SqrtIntrinsic:
1670             if (!MacroAssembler::supportsFloatingPointSqrt())
1671                 return false;
1672             
1673             set(VirtualRegister(resultOperand), addToGraph(ArithSqrt, get(virtualRegisterForArgument(1, registerOffset))));
1674             return true;
1675             
1676         case CosIntrinsic:
1677             set(VirtualRegister(resultOperand), addToGraph(ArithCos, get(virtualRegisterForArgument(1, registerOffset))));
1678             return true;
1679             
1680         case SinIntrinsic:
1681             set(VirtualRegister(resultOperand), addToGraph(ArithSin, get(virtualRegisterForArgument(1, registerOffset))));
1682             return true;
1683             
1684         default:
1685             RELEASE_ASSERT_NOT_REACHED();
1686             return false;
1687         }
1688     }
1689         
1690     case ArrayPushIntrinsic: {
1691         if (argumentCountIncludingThis != 2)
1692             return false;
1693         
1694         ArrayMode arrayMode = getArrayMode(m_currentInstruction[OPCODE_LENGTH(op_call) - 2].u.arrayProfile);
1695         if (!arrayMode.isJSArray())
1696             return false;
1697         switch (arrayMode.type()) {
1698         case Array::Undecided:
1699         case Array::Int32:
1700         case Array::Double:
1701         case Array::Contiguous:
1702         case Array::ArrayStorage: {
1703             Node* arrayPush = addToGraph(ArrayPush, OpInfo(arrayMode.asWord()), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)));
1704             set(VirtualRegister(resultOperand), arrayPush);
1705             
1706             return true;
1707         }
1708             
1709         default:
1710             return false;
1711         }
1712     }
1713         
1714     case ArrayPopIntrinsic: {
1715         if (argumentCountIncludingThis != 1)
1716             return false;
1717         
1718         ArrayMode arrayMode = getArrayMode(m_currentInstruction[OPCODE_LENGTH(op_call) - 2].u.arrayProfile);
1719         if (!arrayMode.isJSArray())
1720             return false;
1721         switch (arrayMode.type()) {
1722         case Array::Int32:
1723         case Array::Double:
1724         case Array::Contiguous:
1725         case Array::ArrayStorage: {
1726             Node* arrayPop = addToGraph(ArrayPop, OpInfo(arrayMode.asWord()), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)));
1727             set(VirtualRegister(resultOperand), arrayPop);
1728             return true;
1729         }
1730             
1731         default:
1732             return false;
1733         }
1734     }
1735
1736     case CharCodeAtIntrinsic: {
1737         if (argumentCountIncludingThis != 2)
1738             return false;
1739
1740         VirtualRegister thisOperand = virtualRegisterForArgument(0, registerOffset);
1741         VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
1742         Node* charCode = addToGraph(StringCharCodeAt, OpInfo(ArrayMode(Array::String).asWord()), get(thisOperand), get(indexOperand));
1743
1744         set(VirtualRegister(resultOperand), charCode);
1745         return true;
1746     }
1747
1748     case CharAtIntrinsic: {
1749         if (argumentCountIncludingThis != 2)
1750             return false;
1751
1752         VirtualRegister thisOperand = virtualRegisterForArgument(0, registerOffset);
1753         VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
1754         Node* charCode = addToGraph(StringCharAt, OpInfo(ArrayMode(Array::String).asWord()), get(thisOperand), get(indexOperand));
1755
1756         set(VirtualRegister(resultOperand), charCode);
1757         return true;
1758     }
1759     case FromCharCodeIntrinsic: {
1760         if (argumentCountIncludingThis != 2)
1761             return false;
1762
1763         VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
1764         Node* charCode = addToGraph(StringFromCharCode, get(indexOperand));
1765
1766         set(VirtualRegister(resultOperand), charCode);
1767
1768         return true;
1769     }
1770
1771     case RegExpExecIntrinsic: {
1772         if (argumentCountIncludingThis != 2)
1773             return false;
1774         
1775         Node* regExpExec = addToGraph(RegExpExec, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)));
1776         set(VirtualRegister(resultOperand), regExpExec);
1777         
1778         return true;
1779     }
1780         
1781     case RegExpTestIntrinsic: {
1782         if (argumentCountIncludingThis != 2)
1783             return false;
1784         
1785         Node* regExpExec = addToGraph(RegExpTest, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)));
1786         set(VirtualRegister(resultOperand), regExpExec);
1787         
1788         return true;
1789     }
1790
1791     case IMulIntrinsic: {
1792         if (argumentCountIncludingThis != 3)
1793             return false;
1794         VirtualRegister leftOperand = virtualRegisterForArgument(1, registerOffset);
1795         VirtualRegister rightOperand = virtualRegisterForArgument(2, registerOffset);
1796         Node* left = get(leftOperand);
1797         Node* right = get(rightOperand);
1798         set(VirtualRegister(resultOperand), addToGraph(ArithIMul, left, right));
1799         return true;
1800     }
1801         
1802     case FRoundIntrinsic: {
1803         if (argumentCountIncludingThis != 2)
1804             return false;
1805         VirtualRegister operand = virtualRegisterForArgument(1, registerOffset);
1806         set(VirtualRegister(resultOperand), addToGraph(ArithFRound, get(operand)));
1807         return true;
1808     }
1809         
1810     case DFGTrueIntrinsic: {
1811         set(VirtualRegister(resultOperand), jsConstant(jsBoolean(true)));
1812         return true;
1813     }
1814         
1815     case OSRExitIntrinsic: {
1816         addToGraph(ForceOSRExit);
1817         set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantUndefined)));
1818         return true;
1819     }
1820         
1821     case IsFinalTierIntrinsic: {
1822         set(VirtualRegister(resultOperand),
1823             jsConstant(jsBoolean(Options::useFTLJIT() ? isFTL(m_graph.m_plan.mode) : true)));
1824         return true;
1825     }
1826         
1827     case SetInt32HeapPredictionIntrinsic: {
1828         for (int i = 1; i < argumentCountIncludingThis; ++i) {
1829             Node* node = get(virtualRegisterForArgument(i, registerOffset));
1830             if (node->hasHeapPrediction())
1831                 node->setHeapPrediction(SpecInt32);
1832         }
1833         set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantUndefined)));
1834         return true;
1835     }
1836         
1837     case FiatInt52Intrinsic: {
1838         if (argumentCountIncludingThis != 2)
1839             return false;
1840         VirtualRegister operand = virtualRegisterForArgument(1, registerOffset);
1841         if (enableInt52())
1842             set(VirtualRegister(resultOperand), addToGraph(FiatInt52, get(operand)));
1843         else
1844             set(VirtualRegister(resultOperand), get(operand));
1845         return true;
1846     }
1847         
1848     default:
1849         return false;
1850     }
1851 }
1852
1853 bool ByteCodeParser::handleTypedArrayConstructor(
1854     int resultOperand, InternalFunction* function, int registerOffset,
1855     int argumentCountIncludingThis, TypedArrayType type)
1856 {
1857     if (!isTypedView(type))
1858         return false;
1859     
1860     if (function->classInfo() != constructorClassInfoForType(type))
1861         return false;
1862     
1863     if (function->globalObject() != m_inlineStackTop->m_codeBlock->globalObject())
1864         return false;
1865     
1866     // We only have an intrinsic for the case where you say:
1867     //
1868     // new FooArray(blah);
1869     //
1870     // Of course, 'blah' could be any of the following:
1871     //
1872     // - Integer, indicating that you want to allocate an array of that length.
1873     //   This is the thing we're hoping for, and what we can actually do meaningful
1874     //   optimizations for.
1875     //
1876     // - Array buffer, indicating that you want to create a view onto that _entire_
1877     //   buffer.
1878     //
1879     // - Non-buffer object, indicating that you want to create a copy of that
1880     //   object by pretending that it quacks like an array.
1881     //
1882     // - Anything else, indicating that you want to have an exception thrown at
1883     //   you.
1884     //
1885     // The intrinsic, NewTypedArray, will behave as if it could do any of these
1886     // things up until we do Fixup. Thereafter, if child1 (i.e. 'blah') is
1887     // predicted Int32, then we lock it in as a normal typed array allocation.
1888     // Otherwise, NewTypedArray turns into a totally opaque function call that
1889     // may clobber the world - by virtue of it accessing properties on what could
1890     // be an object.
1891     //
1892     // Note that although the generic form of NewTypedArray sounds sort of awful,
1893     // it is actually quite likely to be more efficient than a fully generic
1894     // Construct. So, we might want to think about making NewTypedArray variadic,
1895     // or else making Construct not super slow.
1896     
1897     if (argumentCountIncludingThis != 2)
1898         return false;
1899     
1900     set(VirtualRegister(resultOperand),
1901         addToGraph(NewTypedArray, OpInfo(type), get(virtualRegisterForArgument(1, registerOffset))));
1902     return true;
1903 }
1904
1905 bool ByteCodeParser::handleConstantInternalFunction(
1906     int resultOperand, InternalFunction* function, int registerOffset,
1907     int argumentCountIncludingThis, CodeSpecializationKind kind)
1908 {
1909     // If we ever find that we have a lot of internal functions that we specialize for,
1910     // then we should probably have some sort of hashtable dispatch, or maybe even
1911     // dispatch straight through the MethodTable of the InternalFunction. But for now,
1912     // it seems that this case is hit infrequently enough, and the number of functions
1913     // we know about is small enough, that having just a linear cascade of if statements
1914     // is good enough.
1915     
1916     if (function->classInfo() == ArrayConstructor::info()) {
1917         if (function->globalObject() != m_inlineStackTop->m_codeBlock->globalObject())
1918             return false;
1919         
1920         if (argumentCountIncludingThis == 2) {
1921             set(VirtualRegister(resultOperand),
1922                 addToGraph(NewArrayWithSize, OpInfo(ArrayWithUndecided), get(virtualRegisterForArgument(1, registerOffset))));
1923             return true;
1924         }
1925         
1926         for (int i = 1; i < argumentCountIncludingThis; ++i)
1927             addVarArgChild(get(virtualRegisterForArgument(i, registerOffset)));
1928         set(VirtualRegister(resultOperand),
1929             addToGraph(Node::VarArg, NewArray, OpInfo(ArrayWithUndecided), OpInfo(0)));
1930         return true;
1931     }
1932     
1933     if (function->classInfo() == StringConstructor::info()) {
1934         Node* result;
1935         
1936         if (argumentCountIncludingThis <= 1)
1937             result = jsConstant(m_vm->smallStrings.emptyString());
1938         else
1939             result = addToGraph(ToString, get(virtualRegisterForArgument(1, registerOffset)));
1940         
1941         if (kind == CodeForConstruct)
1942             result = addToGraph(NewStringObject, OpInfo(function->globalObject()->stringObjectStructure()), result);
1943         
1944         set(VirtualRegister(resultOperand), result);
1945         return true;
1946     }
1947     
1948     for (unsigned typeIndex = 0; typeIndex < NUMBER_OF_TYPED_ARRAY_TYPES; ++typeIndex) {
1949         bool result = handleTypedArrayConstructor(
1950             resultOperand, function, registerOffset, argumentCountIncludingThis,
1951             indexToTypedArrayType(typeIndex));
1952         if (result)
1953             return true;
1954     }
1955     
1956     return false;
1957 }
1958
1959 Node* ByteCodeParser::handleGetByOffset(SpeculatedType prediction, Node* base, const StructureSet& structureSet, unsigned identifierNumber, PropertyOffset offset, NodeType op)
1960 {
1961     if (base->hasConstant()) {
1962         if (JSValue constant = m_graph.tryGetConstantProperty(base->asJSValue(), structureSet, offset)) {
1963             addToGraph(Phantom, base);
1964             return weakJSConstant(constant);
1965         }
1966     }
1967     
1968     Node* propertyStorage;
1969     if (isInlineOffset(offset))
1970         propertyStorage = base;
1971     else
1972         propertyStorage = addToGraph(GetButterfly, base);
1973     
1974     StorageAccessData* data = m_graph.m_storageAccessData.add();
1975     data->offset = offset;
1976     data->identifierNumber = identifierNumber;
1977     
1978     Node* getByOffset = addToGraph(op, OpInfo(data), OpInfo(prediction), propertyStorage, base);
1979
1980     return getByOffset;
1981 }
1982
1983 Node* ByteCodeParser::handlePutByOffset(Node* base, unsigned identifier, PropertyOffset offset, Node* value)
1984 {
1985     Node* propertyStorage;
1986     if (isInlineOffset(offset))
1987         propertyStorage = base;
1988     else
1989         propertyStorage = addToGraph(GetButterfly, base);
1990     
1991     StorageAccessData* data = m_graph.m_storageAccessData.add();
1992     data->offset = offset;
1993     data->identifierNumber = identifier;
1994     
1995     Node* result = addToGraph(PutByOffset, OpInfo(data), propertyStorage, base, value);
1996     
1997     return result;
1998 }
1999
2000 void ByteCodeParser::emitChecks(const ConstantStructureCheckVector& vector)
2001 {
2002     for (unsigned i = 0; i < vector.size(); ++i)
2003         cellConstantWithStructureCheck(vector[i].constant(), vector[i].structure());
2004 }
2005
2006 void ByteCodeParser::handleGetById(
2007     int destinationOperand, SpeculatedType prediction, Node* base, unsigned identifierNumber,
2008     const GetByIdStatus& getByIdStatus)
2009 {
2010     NodeType getById = getByIdStatus.makesCalls() ? GetByIdFlush : GetById;
2011     
2012     if (!getByIdStatus.isSimple() || !getByIdStatus.numVariants() || !Options::enableAccessInlining()) {
2013         set(VirtualRegister(destinationOperand),
2014             addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base));
2015         return;
2016     }
2017     
2018     if (getByIdStatus.numVariants() > 1) {
2019         if (getByIdStatus.makesCalls() || !isFTL(m_graph.m_plan.mode)
2020             || !Options::enablePolymorphicAccessInlining()) {
2021             set(VirtualRegister(destinationOperand),
2022                 addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base));
2023             return;
2024         }
2025         
2026         if (m_graph.compilation())
2027             m_graph.compilation()->noticeInlinedGetById();
2028     
2029         // 1) Emit prototype structure checks for all chains. This could sort of maybe not be
2030         //    optimal, if there is some rarely executed case in the chain that requires a lot
2031         //    of checks and those checks are not watchpointable.
2032         for (unsigned variantIndex = getByIdStatus.numVariants(); variantIndex--;)
2033             emitChecks(getByIdStatus[variantIndex].constantChecks());
2034         
2035         // 2) Emit a MultiGetByOffset
2036         MultiGetByOffsetData* data = m_graph.m_multiGetByOffsetData.add();
2037         data->variants = getByIdStatus.variants();
2038         data->identifierNumber = identifierNumber;
2039         set(VirtualRegister(destinationOperand),
2040             addToGraph(MultiGetByOffset, OpInfo(data), OpInfo(prediction), base));
2041         return;
2042     }
2043     
2044     ASSERT(getByIdStatus.numVariants() == 1);
2045     GetByIdVariant variant = getByIdStatus[0];
2046                 
2047     if (m_graph.compilation())
2048         m_graph.compilation()->noticeInlinedGetById();
2049     
2050     Node* originalBase = base;
2051                 
2052     addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(variant.structureSet())), base);
2053     
2054     emitChecks(variant.constantChecks());
2055
2056     if (variant.alternateBase())
2057         base = weakJSConstant(variant.alternateBase());
2058     
2059     // Unless we want bugs like https://bugs.webkit.org/show_bug.cgi?id=88783, we need to
2060     // ensure that the base of the original get_by_id is kept alive until we're done with
2061     // all of the speculations. We only insert the Phantom if there had been a CheckStructure
2062     // on something other than the base following the CheckStructure on base.
2063     if (originalBase != base)
2064         addToGraph(Phantom, originalBase);
2065     
2066     Node* loadedValue = handleGetByOffset(
2067         variant.callLinkStatus() ? SpecCellOther : prediction,
2068         base, variant.baseStructure(), identifierNumber, variant.offset(),
2069         variant.callLinkStatus() ? GetGetterSetterByOffset : GetByOffset);
2070     
2071     if (!variant.callLinkStatus()) {
2072         set(VirtualRegister(destinationOperand), loadedValue);
2073         return;
2074     }
2075     
2076     Node* getter = addToGraph(GetGetter, loadedValue);
2077     
2078     // Make a call. We don't try to get fancy with using the smallest operand number because
2079     // the stack layout phase should compress the stack anyway.
2080     
2081     unsigned numberOfParameters = 0;
2082     numberOfParameters++; // The 'this' argument.
2083     numberOfParameters++; // True return PC.
2084     
2085     // Start with a register offset that corresponds to the last in-use register.
2086     int registerOffset = virtualRegisterForLocal(
2087         m_inlineStackTop->m_profiledBlock->m_numCalleeRegisters - 1).offset();
2088     registerOffset -= numberOfParameters;
2089     registerOffset -= JSStack::CallFrameHeaderSize;
2090     
2091     // Get the alignment right.
2092     registerOffset = -WTF::roundUpToMultipleOf(
2093         stackAlignmentRegisters(),
2094         -registerOffset);
2095     
2096     ensureLocals(
2097         m_inlineStackTop->remapOperand(
2098             VirtualRegister(registerOffset)).toLocal());
2099     
2100     // Issue SetLocals. This has two effects:
2101     // 1) That's how handleCall() sees the arguments.
2102     // 2) If we inline then this ensures that the arguments are flushed so that if you use
2103     //    the dreaded arguments object on the getter, the right things happen. Well, sort of -
2104     //    since we only really care about 'this' in this case. But we're not going to take that
2105     //    shortcut.
2106     int nextRegister = registerOffset + JSStack::CallFrameHeaderSize;
2107     set(VirtualRegister(nextRegister++), originalBase, ImmediateNakedSet);
2108     
2109     handleCall(
2110         destinationOperand, Call, InlineCallFrame::GetterCall, OPCODE_LENGTH(op_get_by_id),
2111         getter, numberOfParameters - 1, registerOffset, *variant.callLinkStatus(), prediction);
2112 }
2113
2114 void ByteCodeParser::emitPutById(
2115     Node* base, unsigned identifierNumber, Node* value, const PutByIdStatus& putByIdStatus, bool isDirect)
2116 {
2117     if (isDirect)
2118         addToGraph(PutByIdDirect, OpInfo(identifierNumber), base, value);
2119     else
2120         addToGraph(putByIdStatus.makesCalls() ? PutByIdFlush : PutById, OpInfo(identifierNumber), base, value);
2121 }
2122
2123 void ByteCodeParser::handlePutById(
2124     Node* base, unsigned identifierNumber, Node* value,
2125     const PutByIdStatus& putByIdStatus, bool isDirect)
2126 {
2127     if (!putByIdStatus.isSimple() || !putByIdStatus.numVariants() || !Options::enableAccessInlining()) {
2128         if (!putByIdStatus.isSet())
2129             addToGraph(ForceOSRExit);
2130         emitPutById(base, identifierNumber, value, putByIdStatus, isDirect);
2131         return;
2132     }
2133     
2134     if (putByIdStatus.numVariants() > 1) {
2135         if (!isFTL(m_graph.m_plan.mode) || putByIdStatus.makesCalls()
2136             || !Options::enablePolymorphicAccessInlining()) {
2137             emitPutById(base, identifierNumber, value, putByIdStatus, isDirect);
2138             return;
2139         }
2140         
2141         if (m_graph.compilation())
2142             m_graph.compilation()->noticeInlinedPutById();
2143         
2144         if (!isDirect) {
2145             for (unsigned variantIndex = putByIdStatus.numVariants(); variantIndex--;) {
2146                 if (putByIdStatus[variantIndex].kind() != PutByIdVariant::Transition)
2147                     continue;
2148                 emitChecks(putByIdStatus[variantIndex].constantChecks());
2149             }
2150         }
2151         
2152         MultiPutByOffsetData* data = m_graph.m_multiPutByOffsetData.add();
2153         data->variants = putByIdStatus.variants();
2154         data->identifierNumber = identifierNumber;
2155         addToGraph(MultiPutByOffset, OpInfo(data), base, value);
2156         return;
2157     }
2158     
2159     ASSERT(putByIdStatus.numVariants() == 1);
2160     const PutByIdVariant& variant = putByIdStatus[0];
2161     
2162     switch (variant.kind()) {
2163     case PutByIdVariant::Replace: {
2164         addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(variant.structure())), base);
2165         handlePutByOffset(base, identifierNumber, variant.offset(), value);
2166         if (m_graph.compilation())
2167             m_graph.compilation()->noticeInlinedPutById();
2168         return;
2169     }
2170     
2171     case PutByIdVariant::Transition: {
2172         addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(variant.oldStructure())), base);
2173         emitChecks(variant.constantChecks());
2174
2175         ASSERT(variant.oldStructureForTransition()->transitionWatchpointSetHasBeenInvalidated());
2176     
2177         Node* propertyStorage;
2178         Transition* transition = m_graph.m_transitions.add(
2179             variant.oldStructureForTransition(), variant.newStructure());
2180
2181         if (variant.reallocatesStorage()) {
2182
2183             // If we're growing the property storage then it must be because we're
2184             // storing into the out-of-line storage.
2185             ASSERT(!isInlineOffset(variant.offset()));
2186
2187             if (!variant.oldStructureForTransition()->outOfLineCapacity()) {
2188                 propertyStorage = addToGraph(
2189                     AllocatePropertyStorage, OpInfo(transition), base);
2190             } else {
2191                 propertyStorage = addToGraph(
2192                     ReallocatePropertyStorage, OpInfo(transition),
2193                     base, addToGraph(GetButterfly, base));
2194             }
2195         } else {
2196             if (isInlineOffset(variant.offset()))
2197                 propertyStorage = base;
2198             else
2199                 propertyStorage = addToGraph(GetButterfly, base);
2200         }
2201
2202         addToGraph(PutStructure, OpInfo(transition), base);
2203
2204         StorageAccessData* data = m_graph.m_storageAccessData.add();
2205         data->offset = variant.offset();
2206         data->identifierNumber = identifierNumber;
2207         
2208         addToGraph(
2209             PutByOffset,
2210             OpInfo(data),
2211             propertyStorage,
2212             base,
2213             value);
2214
2215         if (m_graph.compilation())
2216             m_graph.compilation()->noticeInlinedPutById();
2217         return;
2218     }
2219         
2220     case PutByIdVariant::Setter: {
2221         Node* originalBase = base;
2222         
2223         addToGraph(
2224             CheckStructure, OpInfo(m_graph.addStructureSet(variant.structure())), base);
2225         
2226         emitChecks(variant.constantChecks());
2227         
2228         if (variant.alternateBase())
2229             base = weakJSConstant(variant.alternateBase());
2230         
2231         Node* loadedValue = handleGetByOffset(
2232             SpecCellOther, base, variant.baseStructure(), identifierNumber, variant.offset(),
2233             GetGetterSetterByOffset);
2234         
2235         Node* setter = addToGraph(GetSetter, loadedValue);
2236         
2237         // Make a call. We don't try to get fancy with using the smallest operand number because
2238         // the stack layout phase should compress the stack anyway.
2239     
2240         unsigned numberOfParameters = 0;
2241         numberOfParameters++; // The 'this' argument.
2242         numberOfParameters++; // The new value.
2243         numberOfParameters++; // True return PC.
2244     
2245         // Start with a register offset that corresponds to the last in-use register.
2246         int registerOffset = virtualRegisterForLocal(
2247             m_inlineStackTop->m_profiledBlock->m_numCalleeRegisters - 1).offset();
2248         registerOffset -= numberOfParameters;
2249         registerOffset -= JSStack::CallFrameHeaderSize;
2250     
2251         // Get the alignment right.
2252         registerOffset = -WTF::roundUpToMultipleOf(
2253             stackAlignmentRegisters(),
2254             -registerOffset);
2255     
2256         ensureLocals(
2257             m_inlineStackTop->remapOperand(
2258                 VirtualRegister(registerOffset)).toLocal());
2259     
2260         int nextRegister = registerOffset + JSStack::CallFrameHeaderSize;
2261         set(VirtualRegister(nextRegister++), originalBase, ImmediateNakedSet);
2262         set(VirtualRegister(nextRegister++), value, ImmediateNakedSet);
2263     
2264         handleCall(
2265             VirtualRegister().offset(), Call, InlineCallFrame::SetterCall,
2266             OPCODE_LENGTH(op_put_by_id), setter, numberOfParameters - 1, registerOffset,
2267             *variant.callLinkStatus(), SpecOther);
2268         return;
2269     }
2270     
2271     default: {
2272         emitPutById(base, identifierNumber, value, putByIdStatus, isDirect);
2273         return;
2274     } }
2275 }
2276
2277 void ByteCodeParser::prepareToParseBlock()
2278 {
2279     clearCaches();
2280     ASSERT(m_setLocalQueue.isEmpty());
2281 }
2282
2283 void ByteCodeParser::clearCaches()
2284 {
2285     m_constants.resize(0);
2286 }
2287
2288 Node* ByteCodeParser::getScope(VirtualRegister scopeChain, unsigned skipCount)
2289 {
2290     Node* localBase = get(scopeChain);
2291     for (unsigned n = skipCount; n--;)
2292         localBase = addToGraph(SkipScope, localBase);
2293     return localBase;
2294 }
2295
2296 bool ByteCodeParser::parseBlock(unsigned limit)
2297 {
2298     bool shouldContinueParsing = true;
2299
2300     Interpreter* interpreter = m_vm->interpreter;
2301     Instruction* instructionsBegin = m_inlineStackTop->m_codeBlock->instructions().begin();
2302     unsigned blockBegin = m_currentIndex;
2303     
2304     // If we are the first basic block, introduce markers for arguments. This allows
2305     // us to track if a use of an argument may use the actual argument passed, as
2306     // opposed to using a value we set explicitly.
2307     if (m_currentBlock == m_graph.block(0) && !inlineCallFrame()) {
2308         m_graph.m_arguments.resize(m_numArguments);
2309         for (unsigned argument = 0; argument < m_numArguments; ++argument) {
2310             VariableAccessData* variable = newVariableAccessData(
2311                 virtualRegisterForArgument(argument), m_codeBlock->isCaptured(virtualRegisterForArgument(argument)));
2312             variable->mergeStructureCheckHoistingFailed(
2313                 m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
2314             variable->mergeCheckArrayHoistingFailed(
2315                 m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIndexingType));
2316             
2317             Node* setArgument = addToGraph(SetArgument, OpInfo(variable));
2318             m_graph.m_arguments[argument] = setArgument;
2319             m_currentBlock->variablesAtTail.setArgumentFirstTime(argument, setArgument);
2320         }
2321     }
2322
2323     while (true) {
2324         processSetLocalQueue();
2325         
2326         // Don't extend over jump destinations.
2327         if (m_currentIndex == limit) {
2328             // Ordinarily we want to plant a jump. But refuse to do this if the block is
2329             // empty. This is a special case for inlining, which might otherwise create
2330             // some empty blocks in some cases. When parseBlock() returns with an empty
2331             // block, it will get repurposed instead of creating a new one. Note that this
2332             // logic relies on every bytecode resulting in one or more nodes, which would
2333             // be true anyway except for op_loop_hint, which emits a Phantom to force this
2334             // to be true.
2335             if (!m_currentBlock->isEmpty())
2336                 addToGraph(Jump, OpInfo(m_currentIndex));
2337             return shouldContinueParsing;
2338         }
2339         
2340         // Switch on the current bytecode opcode.
2341         Instruction* currentInstruction = instructionsBegin + m_currentIndex;
2342         m_currentInstruction = currentInstruction; // Some methods want to use this, and we'd rather not thread it through calls.
2343         OpcodeID opcodeID = interpreter->getOpcodeID(currentInstruction->u.opcode);
2344         
2345         if (Options::verboseDFGByteCodeParsing())
2346             dataLog("    parsing ", currentCodeOrigin(), "\n");
2347         
2348         if (m_graph.compilation()) {
2349             addToGraph(CountExecution, OpInfo(m_graph.compilation()->executionCounterFor(
2350                 Profiler::OriginStack(*m_vm->m_perBytecodeProfiler, m_codeBlock, currentCodeOrigin()))));
2351         }
2352         
2353         switch (opcodeID) {
2354
2355         // === Function entry opcodes ===
2356
2357         case op_enter: {
2358             Node* undefined = addToGraph(JSConstant, OpInfo(m_constantUndefined));
2359             // Initialize all locals to undefined.
2360             for (int i = 0; i < m_inlineStackTop->m_codeBlock->m_numVars; ++i)
2361                 set(virtualRegisterForLocal(i), undefined, ImmediateNakedSet);
2362             if (m_inlineStackTop->m_codeBlock->specializationKind() == CodeForConstruct)
2363                 set(virtualRegisterForArgument(0), undefined, ImmediateNakedSet);
2364             NEXT_OPCODE(op_enter);
2365         }
2366             
2367         case op_touch_entry:
2368             if (m_inlineStackTop->m_codeBlock->symbolTable()->m_functionEnteredOnce.isStillValid())
2369                 addToGraph(ForceOSRExit);
2370             NEXT_OPCODE(op_touch_entry);
2371             
2372         case op_to_this: {
2373             Node* op1 = getThis();
2374             if (op1->op() != ToThis) {
2375                 Structure* cachedStructure = currentInstruction[2].u.structure.get();
2376                 if (currentInstruction[2].u.toThisStatus != ToThisOK
2377                     || !cachedStructure
2378                     || cachedStructure->classInfo()->methodTable.toThis != JSObject::info()->methodTable.toThis
2379                     || m_inlineStackTop->m_profiledBlock->couldTakeSlowCase(m_currentIndex)
2380                     || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)
2381                     || (op1->op() == GetLocal && op1->variableAccessData()->structureCheckHoistingFailed())) {
2382                     setThis(addToGraph(ToThis, op1));
2383                 } else {
2384                     addToGraph(
2385                         CheckStructure,
2386                         OpInfo(m_graph.addStructureSet(cachedStructure)),
2387                         op1);
2388                 }
2389             }
2390             NEXT_OPCODE(op_to_this);
2391         }
2392
2393         case op_create_this: {
2394             int calleeOperand = currentInstruction[2].u.operand;
2395             Node* callee = get(VirtualRegister(calleeOperand));
2396             bool alreadyEmitted = false;
2397             if (JSFunction* function = callee->dynamicCastConstant<JSFunction*>()) {
2398                 if (Structure* structure = function->allocationStructure()) {
2399                     addToGraph(AllocationProfileWatchpoint, OpInfo(m_graph.freeze(function)));
2400                     // The callee is still live up to this point.
2401                     addToGraph(Phantom, callee);
2402                     set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(NewObject, OpInfo(structure)));
2403                     alreadyEmitted = true;
2404                 }
2405             }
2406             if (!alreadyEmitted) {
2407                 set(VirtualRegister(currentInstruction[1].u.operand),
2408                     addToGraph(CreateThis, OpInfo(currentInstruction[3].u.operand), callee));
2409             }
2410             NEXT_OPCODE(op_create_this);
2411         }
2412
2413         case op_new_object: {
2414             set(VirtualRegister(currentInstruction[1].u.operand),
2415                 addToGraph(NewObject,
2416                     OpInfo(currentInstruction[3].u.objectAllocationProfile->structure())));
2417             NEXT_OPCODE(op_new_object);
2418         }
2419             
2420         case op_new_array: {
2421             int startOperand = currentInstruction[2].u.operand;
2422             int numOperands = currentInstruction[3].u.operand;
2423             ArrayAllocationProfile* profile = currentInstruction[4].u.arrayAllocationProfile;
2424             for (int operandIdx = startOperand; operandIdx > startOperand - numOperands; --operandIdx)
2425                 addVarArgChild(get(VirtualRegister(operandIdx)));
2426             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(Node::VarArg, NewArray, OpInfo(profile->selectIndexingType()), OpInfo(0)));
2427             NEXT_OPCODE(op_new_array);
2428         }
2429             
2430         case op_new_array_with_size: {
2431             int lengthOperand = currentInstruction[2].u.operand;
2432             ArrayAllocationProfile* profile = currentInstruction[3].u.arrayAllocationProfile;
2433             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(NewArrayWithSize, OpInfo(profile->selectIndexingType()), get(VirtualRegister(lengthOperand))));
2434             NEXT_OPCODE(op_new_array_with_size);
2435         }
2436             
2437         case op_new_array_buffer: {
2438             int startConstant = currentInstruction[2].u.operand;
2439             int numConstants = currentInstruction[3].u.operand;
2440             ArrayAllocationProfile* profile = currentInstruction[4].u.arrayAllocationProfile;
2441             NewArrayBufferData data;
2442             data.startConstant = m_inlineStackTop->m_constantBufferRemap[startConstant];
2443             data.numConstants = numConstants;
2444             data.indexingType = profile->selectIndexingType();
2445
2446             // If this statement has never executed, we'll have the wrong indexing type in the profile.
2447             for (int i = 0; i < numConstants; ++i) {
2448                 data.indexingType =
2449                     leastUpperBoundOfIndexingTypeAndValue(
2450                         data.indexingType,
2451                         m_codeBlock->constantBuffer(data.startConstant)[i]);
2452             }
2453             
2454             m_graph.m_newArrayBufferData.append(data);
2455             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(NewArrayBuffer, OpInfo(&m_graph.m_newArrayBufferData.last())));
2456             NEXT_OPCODE(op_new_array_buffer);
2457         }
2458             
2459         case op_new_regexp: {
2460             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(NewRegexp, OpInfo(currentInstruction[2].u.operand)));
2461             NEXT_OPCODE(op_new_regexp);
2462         }
2463             
2464         case op_get_callee: {
2465             JSCell* cachedFunction = currentInstruction[2].u.jsCell.get();
2466             if (!cachedFunction 
2467                 || m_inlineStackTop->m_profiledBlock->couldTakeSlowCase(m_currentIndex)
2468                 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell)) {
2469                 set(VirtualRegister(currentInstruction[1].u.operand), get(VirtualRegister(JSStack::Callee)));
2470             } else {
2471                 FrozenValue* frozen = m_graph.freeze(cachedFunction);
2472                 ASSERT(cachedFunction->inherits(JSFunction::info()));
2473                 Node* actualCallee = get(VirtualRegister(JSStack::Callee));
2474                 addToGraph(CheckCell, OpInfo(frozen), actualCallee);
2475                 set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(JSConstant, OpInfo(frozen)));
2476             }
2477             NEXT_OPCODE(op_get_callee);
2478         }
2479
2480         // === Bitwise operations ===
2481
2482         case op_bitand: {
2483             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2484             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2485             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(BitAnd, op1, op2));
2486             NEXT_OPCODE(op_bitand);
2487         }
2488
2489         case op_bitor: {
2490             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2491             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2492             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(BitOr, op1, op2));
2493             NEXT_OPCODE(op_bitor);
2494         }
2495
2496         case op_bitxor: {
2497             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2498             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2499             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(BitXor, op1, op2));
2500             NEXT_OPCODE(op_bitxor);
2501         }
2502
2503         case op_rshift: {
2504             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2505             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2506             set(VirtualRegister(currentInstruction[1].u.operand),
2507                 addToGraph(BitRShift, op1, op2));
2508             NEXT_OPCODE(op_rshift);
2509         }
2510
2511         case op_lshift: {
2512             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2513             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2514             set(VirtualRegister(currentInstruction[1].u.operand),
2515                 addToGraph(BitLShift, op1, op2));
2516             NEXT_OPCODE(op_lshift);
2517         }
2518
2519         case op_urshift: {
2520             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2521             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2522             set(VirtualRegister(currentInstruction[1].u.operand),
2523                 addToGraph(BitURShift, op1, op2));
2524             NEXT_OPCODE(op_urshift);
2525         }
2526             
2527         case op_unsigned: {
2528             set(VirtualRegister(currentInstruction[1].u.operand),
2529                 makeSafe(addToGraph(UInt32ToNumber, get(VirtualRegister(currentInstruction[2].u.operand)))));
2530             NEXT_OPCODE(op_unsigned);
2531         }
2532
2533         // === Increment/Decrement opcodes ===
2534
2535         case op_inc: {
2536             int srcDst = currentInstruction[1].u.operand;
2537             VirtualRegister srcDstVirtualRegister = VirtualRegister(srcDst);
2538             Node* op = get(srcDstVirtualRegister);
2539             set(srcDstVirtualRegister, makeSafe(addToGraph(ArithAdd, op, addToGraph(JSConstant, OpInfo(m_constantOne)))));
2540             NEXT_OPCODE(op_inc);
2541         }
2542
2543         case op_dec: {
2544             int srcDst = currentInstruction[1].u.operand;
2545             VirtualRegister srcDstVirtualRegister = VirtualRegister(srcDst);
2546             Node* op = get(srcDstVirtualRegister);
2547             set(srcDstVirtualRegister, makeSafe(addToGraph(ArithSub, op, addToGraph(JSConstant, OpInfo(m_constantOne)))));
2548             NEXT_OPCODE(op_dec);
2549         }
2550
2551         // === Arithmetic operations ===
2552
2553         case op_add: {
2554             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2555             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2556             if (op1->hasNumberResult() && op2->hasNumberResult())
2557                 set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithAdd, op1, op2)));
2558             else
2559                 set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ValueAdd, op1, op2)));
2560             NEXT_OPCODE(op_add);
2561         }
2562
2563         case op_sub: {
2564             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2565             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2566             set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithSub, op1, op2)));
2567             NEXT_OPCODE(op_sub);
2568         }
2569
2570         case op_negate: {
2571             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2572             set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithNegate, op1)));
2573             NEXT_OPCODE(op_negate);
2574         }
2575
2576         case op_mul: {
2577             // Multiply requires that the inputs are not truncated, unfortunately.
2578             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2579             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2580             set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithMul, op1, op2)));
2581             NEXT_OPCODE(op_mul);
2582         }
2583
2584         case op_mod: {
2585             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2586             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2587             set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithMod, op1, op2)));
2588             NEXT_OPCODE(op_mod);
2589         }
2590
2591         case op_div: {
2592             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2593             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2594             set(VirtualRegister(currentInstruction[1].u.operand), makeDivSafe(addToGraph(ArithDiv, op1, op2)));
2595             NEXT_OPCODE(op_div);
2596         }
2597
2598         // === Misc operations ===
2599
2600         case op_debug:
2601             addToGraph(Breakpoint);
2602             NEXT_OPCODE(op_debug);
2603
2604         case op_profile_will_call: {
2605             addToGraph(ProfileWillCall);
2606             NEXT_OPCODE(op_profile_will_call);
2607         }
2608
2609         case op_profile_did_call: {
2610             addToGraph(ProfileDidCall);
2611             NEXT_OPCODE(op_profile_did_call);
2612         }
2613
2614         case op_mov: {
2615             Node* op = get(VirtualRegister(currentInstruction[2].u.operand));
2616             set(VirtualRegister(currentInstruction[1].u.operand), op);
2617             NEXT_OPCODE(op_mov);
2618         }
2619
2620         case op_check_has_instance:
2621             addToGraph(CheckHasInstance, get(VirtualRegister(currentInstruction[3].u.operand)));
2622             NEXT_OPCODE(op_check_has_instance);
2623
2624         case op_instanceof: {
2625             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2626             Node* prototype = get(VirtualRegister(currentInstruction[3].u.operand));
2627             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(InstanceOf, value, prototype));
2628             NEXT_OPCODE(op_instanceof);
2629         }
2630             
2631         case op_is_undefined: {
2632             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2633             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsUndefined, value));
2634             NEXT_OPCODE(op_is_undefined);
2635         }
2636
2637         case op_is_boolean: {
2638             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2639             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsBoolean, value));
2640             NEXT_OPCODE(op_is_boolean);
2641         }
2642
2643         case op_is_number: {
2644             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2645             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsNumber, value));
2646             NEXT_OPCODE(op_is_number);
2647         }
2648
2649         case op_is_string: {
2650             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2651             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsString, value));
2652             NEXT_OPCODE(op_is_string);
2653         }
2654
2655         case op_is_object: {
2656             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2657             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsObject, value));
2658             NEXT_OPCODE(op_is_object);
2659         }
2660
2661         case op_is_function: {
2662             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2663             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsFunction, value));
2664             NEXT_OPCODE(op_is_function);
2665         }
2666
2667         case op_not: {
2668             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2669             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, value));
2670             NEXT_OPCODE(op_not);
2671         }
2672             
2673         case op_to_primitive: {
2674             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2675             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(ToPrimitive, value));
2676             NEXT_OPCODE(op_to_primitive);
2677         }
2678             
2679         case op_strcat: {
2680             int startOperand = currentInstruction[2].u.operand;
2681             int numOperands = currentInstruction[3].u.operand;
2682 #if CPU(X86)
2683             // X86 doesn't have enough registers to compile MakeRope with three arguments.
2684             // Rather than try to be clever, we just make MakeRope dumber on this processor.
2685             const unsigned maxRopeArguments = 2;
2686 #else
2687             const unsigned maxRopeArguments = 3;
2688 #endif
2689             auto toStringNodes = std::make_unique<Node*[]>(numOperands);
2690             for (int i = 0; i < numOperands; i++)
2691                 toStringNodes[i] = addToGraph(ToString, get(VirtualRegister(startOperand - i)));
2692
2693             for (int i = 0; i < numOperands; i++)
2694                 addToGraph(Phantom, toStringNodes[i]);
2695
2696             Node* operands[AdjacencyList::Size];
2697             unsigned indexInOperands = 0;
2698             for (unsigned i = 0; i < AdjacencyList::Size; ++i)
2699                 operands[i] = 0;
2700             for (int operandIdx = 0; operandIdx < numOperands; ++operandIdx) {
2701                 if (indexInOperands == maxRopeArguments) {
2702                     operands[0] = addToGraph(MakeRope, operands[0], operands[1], operands[2]);
2703                     for (unsigned i = 1; i < AdjacencyList::Size; ++i)
2704                         operands[i] = 0;
2705                     indexInOperands = 1;
2706                 }
2707                 
2708                 ASSERT(indexInOperands < AdjacencyList::Size);
2709                 ASSERT(indexInOperands < maxRopeArguments);
2710                 operands[indexInOperands++] = toStringNodes[operandIdx];
2711             }
2712             set(VirtualRegister(currentInstruction[1].u.operand),
2713                 addToGraph(MakeRope, operands[0], operands[1], operands[2]));
2714             NEXT_OPCODE(op_strcat);
2715         }
2716
2717         case op_less: {
2718             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2719             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2720             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareLess, op1, op2));
2721             NEXT_OPCODE(op_less);
2722         }
2723
2724         case op_lesseq: {
2725             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2726             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2727             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareLessEq, op1, op2));
2728             NEXT_OPCODE(op_lesseq);
2729         }
2730
2731         case op_greater: {
2732             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2733             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2734             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareGreater, op1, op2));
2735             NEXT_OPCODE(op_greater);
2736         }
2737
2738         case op_greatereq: {
2739             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2740             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2741             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareGreaterEq, op1, op2));
2742             NEXT_OPCODE(op_greatereq);
2743         }
2744
2745         case op_eq: {
2746             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2747             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2748             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareEq, op1, op2));
2749             NEXT_OPCODE(op_eq);
2750         }
2751
2752         case op_eq_null: {
2753             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2754             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareEqConstant, value, addToGraph(JSConstant, OpInfo(m_constantNull))));
2755             NEXT_OPCODE(op_eq_null);
2756         }
2757
2758         case op_stricteq: {
2759             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2760             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2761             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareStrictEq, op1, op2));
2762             NEXT_OPCODE(op_stricteq);
2763         }
2764
2765         case op_neq: {
2766             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2767             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2768             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, addToGraph(CompareEq, op1, op2)));
2769             NEXT_OPCODE(op_neq);
2770         }
2771
2772         case op_neq_null: {
2773             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2774             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, addToGraph(CompareEqConstant, value, addToGraph(JSConstant, OpInfo(m_constantNull)))));
2775             NEXT_OPCODE(op_neq_null);
2776         }
2777
2778         case op_nstricteq: {
2779             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2780             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2781             Node* invertedResult;
2782             invertedResult = addToGraph(CompareStrictEq, op1, op2);
2783             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, invertedResult));
2784             NEXT_OPCODE(op_nstricteq);
2785         }
2786
2787         // === Property access operations ===
2788
2789         case op_get_by_val: {
2790             SpeculatedType prediction = getPredictionWithoutOSRExit();
2791             
2792             Node* base = get(VirtualRegister(currentInstruction[2].u.operand));
2793             ArrayMode arrayMode = getArrayModeConsideringSlowPath(currentInstruction[4].u.arrayProfile, Array::Read);
2794             Node* property = get(VirtualRegister(currentInstruction[3].u.operand));
2795             Node* getByVal = addToGraph(GetByVal, OpInfo(arrayMode.asWord()), OpInfo(prediction), base, property);
2796             set(VirtualRegister(currentInstruction[1].u.operand), getByVal);
2797
2798             NEXT_OPCODE(op_get_by_val);
2799         }
2800
2801         case op_put_by_val_direct:
2802         case op_put_by_val: {
2803             Node* base = get(VirtualRegister(currentInstruction[1].u.operand));
2804
2805             ArrayMode arrayMode = getArrayModeConsideringSlowPath(currentInstruction[4].u.arrayProfile, Array::Write);
2806             
2807             Node* property = get(VirtualRegister(currentInstruction[2].u.operand));
2808             Node* value = get(VirtualRegister(currentInstruction[3].u.operand));
2809             
2810             addVarArgChild(base);
2811             addVarArgChild(property);
2812             addVarArgChild(value);
2813             addVarArgChild(0); // Leave room for property storage.
2814             addVarArgChild(0); // Leave room for length.
2815             addToGraph(Node::VarArg, opcodeID == op_put_by_val_direct ? PutByValDirect : PutByVal, OpInfo(arrayMode.asWord()), OpInfo(0));
2816
2817             NEXT_OPCODE(op_put_by_val);
2818         }
2819             
2820         case op_get_by_id:
2821         case op_get_by_id_out_of_line:
2822         case op_get_array_length: {
2823             SpeculatedType prediction = getPrediction();
2824             
2825             Node* base = get(VirtualRegister(currentInstruction[2].u.operand));
2826             unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand];
2827             
2828             AtomicStringImpl* uid = m_graph.identifiers()[identifierNumber];
2829             GetByIdStatus getByIdStatus = GetByIdStatus::computeFor(
2830                 m_inlineStackTop->m_profiledBlock, m_dfgCodeBlock,
2831                 m_inlineStackTop->m_stubInfos, m_dfgStubInfos,
2832                 currentCodeOrigin(), uid);
2833             
2834             handleGetById(
2835                 currentInstruction[1].u.operand, prediction, base, identifierNumber, getByIdStatus);
2836
2837             NEXT_OPCODE(op_get_by_id);
2838         }
2839         case op_put_by_id:
2840         case op_put_by_id_out_of_line:
2841         case op_put_by_id_transition_direct:
2842         case op_put_by_id_transition_normal:
2843         case op_put_by_id_transition_direct_out_of_line:
2844         case op_put_by_id_transition_normal_out_of_line: {
2845             Node* value = get(VirtualRegister(currentInstruction[3].u.operand));
2846             Node* base = get(VirtualRegister(currentInstruction[1].u.operand));
2847             unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
2848             bool direct = currentInstruction[8].u.operand;
2849
2850             PutByIdStatus putByIdStatus = PutByIdStatus::computeFor(
2851                 m_inlineStackTop->m_profiledBlock, m_dfgCodeBlock,
2852                 m_inlineStackTop->m_stubInfos, m_dfgStubInfos,
2853                 currentCodeOrigin(), m_graph.identifiers()[identifierNumber]);
2854             
2855             handlePutById(base, identifierNumber, value, putByIdStatus, direct);
2856             NEXT_OPCODE(op_put_by_id);
2857         }
2858
2859         case op_init_global_const_nop: {
2860             NEXT_OPCODE(op_init_global_const_nop);
2861         }
2862
2863         case op_init_global_const: {
2864             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2865             addToGraph(
2866                 PutGlobalVar,
2867                 OpInfo(m_inlineStackTop->m_codeBlock->globalObject()->assertRegisterIsInThisObject(currentInstruction[1].u.registerPointer)),
2868                 value);
2869             NEXT_OPCODE(op_init_global_const);
2870         }
2871
2872         case op_profile_type: {
2873             Node* valueToProfile = get(VirtualRegister(currentInstruction[1].u.operand));
2874             addToGraph(ProfileType, OpInfo(currentInstruction[2].u.location), valueToProfile);
2875             NEXT_OPCODE(op_profile_type);
2876         }
2877
2878         case op_profile_control_flow: {
2879             BasicBlockLocation* basicBlockLocation = currentInstruction[1].u.basicBlockLocation;
2880             addToGraph(ProfileControlFlow, OpInfo(basicBlockLocation));
2881             NEXT_OPCODE(op_profile_control_flow);
2882         }
2883
2884         // === Block terminators. ===
2885
2886         case op_jmp: {
2887             int relativeOffset = currentInstruction[1].u.operand;
2888             if (relativeOffset <= 0)
2889                 flushForTerminal();
2890             addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
2891             LAST_OPCODE(op_jmp);
2892         }
2893
2894         case op_jtrue: {
2895             unsigned relativeOffset = currentInstruction[2].u.operand;
2896             Node* condition = get(VirtualRegister(currentInstruction[1].u.operand));
2897             addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jtrue))), condition);
2898             LAST_OPCODE(op_jtrue);
2899         }
2900
2901         case op_jfalse: {
2902             unsigned relativeOffset = currentInstruction[2].u.operand;
2903             Node* condition = get(VirtualRegister(currentInstruction[1].u.operand));
2904             addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jfalse), m_currentIndex + relativeOffset)), condition);
2905             LAST_OPCODE(op_jfalse);
2906         }
2907
2908         case op_jeq_null: {
2909             unsigned relativeOffset = currentInstruction[2].u.operand;
2910             Node* value = get(VirtualRegister(currentInstruction[1].u.operand));
2911             Node* condition = addToGraph(CompareEqConstant, value, addToGraph(JSConstant, OpInfo(m_constantNull)));
2912             addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jeq_null))), condition);
2913             LAST_OPCODE(op_jeq_null);
2914         }
2915
2916         case op_jneq_null: {
2917             unsigned relativeOffset = currentInstruction[2].u.operand;
2918             Node* value = get(VirtualRegister(currentInstruction[1].u.operand));
2919             Node* condition = addToGraph(CompareEqConstant, value, addToGraph(JSConstant, OpInfo(m_constantNull)));
2920             addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jneq_null), m_currentIndex + relativeOffset)), condition);
2921             LAST_OPCODE(op_jneq_null);
2922         }
2923
2924         case op_jless: {
2925             unsigned relativeOffset = currentInstruction[3].u.operand;
2926             Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
2927             Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
2928             Node* condition = addToGraph(CompareLess, op1, op2);
2929             addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jless))), condition);
2930             LAST_OPCODE(op_jless);
2931         }
2932
2933         case op_jlesseq: {
2934             unsigned relativeOffset = currentInstruction[3].u.operand;
2935             Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
2936             Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
2937             Node* condition = addToGraph(CompareLessEq, op1, op2);
2938             addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jlesseq))), condition);
2939             LAST_OPCODE(op_jlesseq);
2940         }
2941
2942         case op_jgreater: {
2943             unsigned relativeOffset = currentInstruction[3].u.operand;
2944             Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
2945             Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
2946             Node* condition = addToGraph(CompareGreater, op1, op2);
2947             addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jgreater))), condition);
2948             LAST_OPCODE(op_jgreater);
2949         }
2950
2951         case op_jgreatereq: {
2952             unsigned relativeOffset = currentInstruction[3].u.operand;
2953             Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
2954             Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
2955             Node* condition = addToGraph(CompareGreaterEq, op1, op2);
2956             addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jgreatereq))), condition);
2957             LAST_OPCODE(op_jgreatereq);
2958         }
2959
2960         case op_jnless: {
2961             unsigned relativeOffset = currentInstruction[3].u.operand;
2962             Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
2963             Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
2964             Node* condition = addToGraph(CompareLess, op1, op2);
2965             addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jnless), m_currentIndex + relativeOffset)), condition);
2966             LAST_OPCODE(op_jnless);
2967         }
2968
2969         case op_jnlesseq: {
2970             unsigned relativeOffset = currentInstruction[3].u.operand;
2971             Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
2972             Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
2973             Node* condition = addToGraph(CompareLessEq, op1, op2);
2974             addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jnlesseq), m_currentIndex + relativeOffset)), condition);
2975             LAST_OPCODE(op_jnlesseq);
2976         }
2977
2978         case op_jngreater: {
2979             unsigned relativeOffset = currentInstruction[3].u.operand;
2980             Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
2981             Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
2982             Node* condition = addToGraph(CompareGreater, op1, op2);
2983             addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jngreater), m_currentIndex + relativeOffset)), condition);
2984             LAST_OPCODE(op_jngreater);
2985         }
2986
2987         case op_jngreatereq: {
2988             unsigned relativeOffset = currentInstruction[3].u.operand;
2989             Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
2990             Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
2991             Node* condition = addToGraph(CompareGreaterEq, op1, op2);
2992             addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jngreatereq), m_currentIndex + relativeOffset)), condition);
2993             LAST_OPCODE(op_jngreatereq);
2994         }
2995             
2996         case op_switch_imm: {
2997             SwitchData& data = *m_graph.m_switchData.add();
2998             data.kind = SwitchImm;
2999             data.switchTableIndex = m_inlineStackTop->m_switchRemap[currentInstruction[1].u.operand];
3000             data.fallThrough.setBytecodeIndex(m_currentIndex + currentInstruction[2].u.operand);
3001             SimpleJumpTable& table = m_codeBlock->switchJumpTable(data.switchTableIndex);
3002             for (unsigned i = 0; i < table.branchOffsets.size(); ++i) {
3003                 if (!table.branchOffsets[i])
3004                     continue;
3005                 unsigned target = m_currentIndex + table.branchOffsets[i];
3006                 if (target == data.fallThrough.bytecodeIndex())
3007                     continue;
3008                 data.cases.append(SwitchCase::withBytecodeIndex(m_graph.freeze(jsNumber(static_cast<int32_t>(table.min + i))), target));
3009             }
3010             flushIfTerminal(data);
3011             addToGraph(Switch, OpInfo(&data), get(VirtualRegister(currentInstruction[3].u.operand)));
3012             LAST_OPCODE(op_switch_imm);
3013         }
3014             
3015         case op_switch_char: {
3016             SwitchData& data = *m_graph.m_switchData.add();
3017             data.kind = SwitchChar;
3018             data.switchTableIndex = m_inlineStackTop->m_switchRemap[currentInstruction[1].u.operand];
3019             data.fallThrough.setBytecodeIndex(m_currentIndex + currentInstruction[2].u.operand);
3020             SimpleJumpTable& table = m_codeBlock->switchJumpTable(data.switchTableIndex);
3021             for (unsigned i = 0; i < table.branchOffsets.size(); ++i) {
3022                 if (!table.branchOffsets[i])
3023                     continue;
3024                 unsigned target = m_currentIndex + table.branchOffsets[i];
3025                 if (target == data.fallThrough.bytecodeIndex())
3026                     continue;
3027                 data.cases.append(
3028                     SwitchCase::withBytecodeIndex(LazyJSValue::singleCharacterString(table.min + i), target));
3029             }
3030             flushIfTerminal(data);
3031             addToGraph(Switch, OpInfo(&data), get(VirtualRegister(currentInstruction[3].u.operand)));
3032             LAST_OPCODE(op_switch_char);
3033         }
3034
3035         case op_switch_string: {
3036             SwitchData& data = *m_graph.m_switchData.add();
3037             data.kind = SwitchString;
3038             data.switchTableIndex = currentInstruction[1].u.operand;
3039             data.fallThrough.setBytecodeIndex(m_currentIndex + currentInstruction[2].u.operand);
3040             StringJumpTable& table = m_codeBlock->stringSwitchJumpTable(data.switchTableIndex);
3041             StringJumpTable::StringOffsetTable::iterator iter;
3042             StringJumpTable::StringOffsetTable::iterator end = table.offsetTable.end();
3043             for (iter = table.offsetTable.begin(); iter != end; ++iter) {
3044                 unsigned target = m_currentIndex + iter->value.branchOffset;
3045                 if (target == data.fallThrough.bytecodeIndex())
3046                     continue;
3047                 data.cases.append(
3048                     SwitchCase::withBytecodeIndex(LazyJSValue::knownStringImpl(iter->key.get()), target));
3049             }
3050             flushIfTerminal(data);
3051             addToGraph(Switch, OpInfo(&data), get(VirtualRegister(currentInstruction[3].u.operand)));
3052             LAST_OPCODE(op_switch_string);
3053         }
3054
3055         case op_ret:
3056             flushForReturn();
3057             if (inlineCallFrame()) {
3058                 if (m_inlineStackTop->m_returnValue.isValid())
3059                     setDirect(m_inlineStackTop->m_returnValue, get(VirtualRegister(currentInstruction[1].u.operand)), ImmediateSetWithFlush);
3060                 m_inlineStackTop->m_didReturn = true;
3061                 if (m_inlineStackTop->m_unlinkedBlocks.isEmpty()) {
3062                     // If we're returning from the first block, then we're done parsing.
3063                     ASSERT(m_inlineStackTop->m_callsiteBlockHead == m_graph.lastBlock());
3064                     shouldContinueParsing = false;
3065                     LAST_OPCODE(op_ret);
3066                 } else {
3067                     // If inlining created blocks, and we're doing a return, then we need some
3068                     // special linking.
3069                     ASSERT(m_inlineStackTop->m_unlinkedBlocks.last().m_block == m_graph.lastBlock());
3070                     m_inlineStackTop->m_unlinkedBlocks.last().m_needsNormalLinking = false;
3071                 }
3072                 if (m_currentIndex + OPCODE_LENGTH(op_ret) != m_inlineStackTop->m_codeBlock->instructions().size() || m_inlineStackTop->m_didEarlyReturn) {
3073                     ASSERT(m_currentIndex + OPCODE_LENGTH(op_ret) <= m_inlineStackTop->m_codeBlock->instructions().size());
3074                     addToGraph(Jump, OpInfo(0));
3075                     m_inlineStackTop->m_unlinkedBlocks.last().m_needsEarlyReturnLinking = true;
3076                     m_inlineStackTop->m_didEarlyReturn = true;
3077                 }
3078                 LAST_OPCODE(op_ret);
3079             }
3080             addToGraph(Return, get(VirtualRegister(currentInstruction[1].u.operand)));
3081             LAST_OPCODE(op_ret);
3082             
3083         case op_end:
3084             flushForReturn();
3085             ASSERT(!inlineCallFrame());
3086             addToGraph(Return, get(VirtualRegister(currentInstruction[1].u.operand)));
3087             LAST_OPCODE(op_end);
3088
3089         case op_throw:
3090             addToGraph(Throw, get(VirtualRegister(currentInstruction[1].u.operand)));
3091             flushForTerminal();
3092             addToGraph(Unreachable);
3093             LAST_OPCODE(op_throw);
3094             
3095         case op_throw_static_error:
3096             addToGraph(ThrowReferenceError);
3097             flushForTerminal();
3098             addToGraph(Unreachable);
3099             LAST_OPCODE(op_throw_static_error);
3100             
3101         case op_call:
3102             handleCall(currentInstruction, Call, CodeForCall);
3103             NEXT_OPCODE(op_call);
3104             
3105         case op_construct:
3106             handleCall(currentInstruction, Construct, CodeForConstruct);
3107             NEXT_OPCODE(op_construct);
3108             
3109         case op_call_varargs: {
3110             int result = currentInstruction[1].u.operand;
3111             int callee = currentInstruction[2].u.operand;
3112             int thisReg = currentInstruction[3].u.operand;
3113             int arguments = currentInstruction[4].u.operand;
3114             int firstFreeReg = currentInstruction[5].u.operand;
3115             
3116             ASSERT(inlineCallFrame());
3117             ASSERT_UNUSED(arguments, arguments == m_inlineStackTop->m_codeBlock->argumentsRegister().offset());
3118             ASSERT(!m_inlineStackTop->m_codeBlock->symbolTable()->slowArguments());
3119
3120             addToGraph(CheckArgumentsNotCreated);
3121
3122             unsigned argCount = inlineCallFrame()->arguments.size();
3123             
3124             // Let's compute the register offset. We start with the last used register, and
3125             // then adjust for the things we want in the call frame.
3126             int registerOffset = firstFreeReg + 1;
3127             registerOffset -= argCount; // We will be passing some arguments.
3128             registerOffset -= JSStack::CallFrameHeaderSize; // We will pretend to have a call frame header.
3129             
3130             // Get the alignment right.
3131             registerOffset = -WTF::roundUpToMultipleOf(
3132                 stackAlignmentRegisters(),
3133                 -registerOffset);
3134
3135             ensureLocals(
3136                 m_inlineStackTop->remapOperand(
3137                     VirtualRegister(registerOffset)).toLocal());
3138             
3139             // The bytecode wouldn't have set up the arguments. But we'll do it and make it
3140             // look like the bytecode had done it.
3141             int nextRegister = registerOffset + JSStack::CallFrameHeaderSize;
3142             set(VirtualRegister(nextRegister++), get(VirtualRegister(thisReg)), ImmediateNakedSet);
3143             for (unsigned argument = 1; argument < argCount; ++argument)
3144                 set(VirtualRegister(nextRegister++), get(virtualRegisterForArgument(argument)), ImmediateNakedSet);
3145             
3146             handleCall(
3147                 result, Call, CodeForCall, OPCODE_LENGTH(op_call_varargs),
3148                 callee, argCount, registerOffset);
3149             NEXT_OPCODE(op_call_varargs);