8c85a4609a841ecd693e31ef14432afe942253f4
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGByteCodeParser.cpp
1 /*
2  * Copyright (C) 2011-2015 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGByteCodeParser.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "ArrayConstructor.h"
32 #include "BasicBlockLocation.h"
33 #include "CallLinkStatus.h"
34 #include "CodeBlock.h"
35 #include "CodeBlockWithJITType.h"
36 #include "DFGArrayMode.h"
37 #include "DFGCapabilities.h"
38 #include "DFGGraph.h"
39 #include "DFGJITCode.h"
40 #include "GetByIdStatus.h"
41 #include "Heap.h"
42 #include "JSLexicalEnvironment.h"
43 #include "JSCInlines.h"
44 #include "PreciseJumpTargets.h"
45 #include "PutByIdStatus.h"
46 #include "StackAlignment.h"
47 #include "StringConstructor.h"
48 #include <wtf/CommaPrinter.h>
49 #include <wtf/HashMap.h>
50 #include <wtf/MathExtras.h>
51 #include <wtf/StdLibExtras.h>
52
53 namespace JSC { namespace DFG {
54
55 static const bool verbose = false;
56
57 class ConstantBufferKey {
58 public:
59     ConstantBufferKey()
60         : m_codeBlock(0)
61         , m_index(0)
62     {
63     }
64     
65     ConstantBufferKey(WTF::HashTableDeletedValueType)
66         : m_codeBlock(0)
67         , m_index(1)
68     {
69     }
70     
71     ConstantBufferKey(CodeBlock* codeBlock, unsigned index)
72         : m_codeBlock(codeBlock)
73         , m_index(index)
74     {
75     }
76     
77     bool operator==(const ConstantBufferKey& other) const
78     {
79         return m_codeBlock == other.m_codeBlock
80             && m_index == other.m_index;
81     }
82     
83     unsigned hash() const
84     {
85         return WTF::PtrHash<CodeBlock*>::hash(m_codeBlock) ^ m_index;
86     }
87     
88     bool isHashTableDeletedValue() const
89     {
90         return !m_codeBlock && m_index;
91     }
92     
93     CodeBlock* codeBlock() const { return m_codeBlock; }
94     unsigned index() const { return m_index; }
95     
96 private:
97     CodeBlock* m_codeBlock;
98     unsigned m_index;
99 };
100
101 struct ConstantBufferKeyHash {
102     static unsigned hash(const ConstantBufferKey& key) { return key.hash(); }
103     static bool equal(const ConstantBufferKey& a, const ConstantBufferKey& b)
104     {
105         return a == b;
106     }
107     
108     static const bool safeToCompareToEmptyOrDeleted = true;
109 };
110
111 } } // namespace JSC::DFG
112
113 namespace WTF {
114
115 template<typename T> struct DefaultHash;
116 template<> struct DefaultHash<JSC::DFG::ConstantBufferKey> {
117     typedef JSC::DFG::ConstantBufferKeyHash Hash;
118 };
119
120 template<typename T> struct HashTraits;
121 template<> struct HashTraits<JSC::DFG::ConstantBufferKey> : SimpleClassHashTraits<JSC::DFG::ConstantBufferKey> { };
122
123 } // namespace WTF
124
125 namespace JSC { namespace DFG {
126
127 // === ByteCodeParser ===
128 //
129 // This class is used to compile the dataflow graph from a CodeBlock.
130 class ByteCodeParser {
131 public:
132     ByteCodeParser(Graph& graph)
133         : m_vm(&graph.m_vm)
134         , m_codeBlock(graph.m_codeBlock)
135         , m_profiledBlock(graph.m_profiledBlock)
136         , m_graph(graph)
137         , m_currentBlock(0)
138         , m_currentIndex(0)
139         , m_constantUndefined(graph.freeze(jsUndefined()))
140         , m_constantNull(graph.freeze(jsNull()))
141         , m_constantNaN(graph.freeze(jsNumber(PNaN)))
142         , m_constantOne(graph.freeze(jsNumber(1)))
143         , m_numArguments(m_codeBlock->numParameters())
144         , m_numLocals(m_codeBlock->m_numCalleeRegisters)
145         , m_parameterSlots(0)
146         , m_numPassedVarArgs(0)
147         , m_inlineStackTop(0)
148         , m_haveBuiltOperandMaps(false)
149         , m_currentInstruction(0)
150         , m_hasDebuggerEnabled(graph.hasDebuggerEnabled())
151     {
152         ASSERT(m_profiledBlock);
153     }
154     
155     // Parse a full CodeBlock of bytecode.
156     bool parse();
157     
158 private:
159     struct InlineStackEntry;
160
161     // Just parse from m_currentIndex to the end of the current CodeBlock.
162     void parseCodeBlock();
163     
164     void ensureLocals(unsigned newNumLocals)
165     {
166         if (newNumLocals <= m_numLocals)
167             return;
168         m_numLocals = newNumLocals;
169         for (size_t i = 0; i < m_graph.numBlocks(); ++i)
170             m_graph.block(i)->ensureLocals(newNumLocals);
171     }
172
173     // Helper for min and max.
174     template<typename ChecksFunctor>
175     bool handleMinMax(int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& insertChecks);
176     
177     // Handle calls. This resolves issues surrounding inlining and intrinsics.
178     void handleCall(
179         int result, NodeType op, InlineCallFrame::Kind, unsigned instructionSize,
180         Node* callTarget, int argCount, int registerOffset, CallLinkStatus,
181         SpeculatedType prediction);
182     void handleCall(
183         int result, NodeType op, InlineCallFrame::Kind, unsigned instructionSize,
184         Node* callTarget, int argCount, int registerOffset, CallLinkStatus);
185     void handleCall(int result, NodeType op, CodeSpecializationKind, unsigned instructionSize, int callee, int argCount, int registerOffset);
186     void handleCall(Instruction* pc, NodeType op, CodeSpecializationKind);
187     void handleVarargsCall(Instruction* pc, NodeType op, CodeSpecializationKind);
188     void emitFunctionChecks(CallVariant, Node* callTarget, VirtualRegister thisArgumnt);
189     void emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis);
190     unsigned inliningCost(CallVariant, int argumentCountIncludingThis, CodeSpecializationKind); // Return UINT_MAX if it's not an inlining candidate. By convention, intrinsics have a cost of 1.
191     // Handle inlining. Return true if it succeeded, false if we need to plant a call.
192     bool handleInlining(Node* callTargetNode, int resultOperand, const CallLinkStatus&, int registerOffset, VirtualRegister thisArgument, VirtualRegister argumentsArgument, unsigned argumentsOffset, int argumentCountIncludingThis, unsigned nextOffset, NodeType callOp, InlineCallFrame::Kind, SpeculatedType prediction);
193     enum CallerLinkability { CallerDoesNormalLinking, CallerLinksManually };
194     template<typename ChecksFunctor>
195     bool attemptToInlineCall(Node* callTargetNode, int resultOperand, CallVariant, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind, CallerLinkability, SpeculatedType prediction, unsigned& inliningBalance, const ChecksFunctor& insertChecks);
196     template<typename ChecksFunctor>
197     void inlineCall(Node* callTargetNode, int resultOperand, CallVariant, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind, CallerLinkability, const ChecksFunctor& insertChecks);
198     void cancelLinkingForBlock(InlineStackEntry*, BasicBlock*); // Only works when the given block is the last one to have been added for that inline stack entry.
199     // Handle intrinsic functions. Return true if it succeeded, false if we need to plant a call.
200     template<typename ChecksFunctor>
201     bool handleIntrinsic(int resultOperand, Intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks);
202     template<typename ChecksFunctor>
203     bool handleTypedArrayConstructor(int resultOperand, InternalFunction*, int registerOffset, int argumentCountIncludingThis, TypedArrayType, const ChecksFunctor& insertChecks);
204     template<typename ChecksFunctor>
205     bool handleConstantInternalFunction(int resultOperand, InternalFunction*, int registerOffset, int argumentCountIncludingThis, CodeSpecializationKind, const ChecksFunctor& insertChecks);
206     Node* handlePutByOffset(Node* base, unsigned identifier, PropertyOffset, Node* value);
207     Node* handleGetByOffset(SpeculatedType, Node* base, const StructureSet&, unsigned identifierNumber, PropertyOffset, NodeType op = GetByOffset);
208     void handleGetById(
209         int destinationOperand, SpeculatedType, Node* base, unsigned identifierNumber,
210         const GetByIdStatus&);
211     void emitPutById(
212         Node* base, unsigned identifierNumber, Node* value,  const PutByIdStatus&, bool isDirect);
213     void handlePutById(
214         Node* base, unsigned identifierNumber, Node* value, const PutByIdStatus&,
215         bool isDirect);
216     void emitChecks(const ConstantStructureCheckVector&);
217
218     void prepareToParseBlock();
219     void clearCaches();
220
221     // Parse a single basic block of bytecode instructions.
222     bool parseBlock(unsigned limit);
223     // Link block successors.
224     void linkBlock(BasicBlock*, Vector<BasicBlock*>& possibleTargets);
225     void linkBlocks(Vector<UnlinkedBlock>& unlinkedBlocks, Vector<BasicBlock*>& possibleTargets);
226     
227     VariableAccessData* newVariableAccessData(VirtualRegister operand)
228     {
229         ASSERT(!operand.isConstant());
230         
231         m_graph.m_variableAccessData.append(VariableAccessData(operand));
232         return &m_graph.m_variableAccessData.last();
233     }
234     
235     // Get/Set the operands/result of a bytecode instruction.
236     Node* getDirect(VirtualRegister operand)
237     {
238         ASSERT(!operand.isConstant());
239
240         // Is this an argument?
241         if (operand.isArgument())
242             return getArgument(operand);
243
244         // Must be a local.
245         return getLocal(operand);
246     }
247
248     Node* get(VirtualRegister operand)
249     {
250         if (operand.isConstant()) {
251             unsigned constantIndex = operand.toConstantIndex();
252             unsigned oldSize = m_constants.size();
253             if (constantIndex >= oldSize || !m_constants[constantIndex]) {
254                 const CodeBlock& codeBlock = *m_inlineStackTop->m_codeBlock;
255                 JSValue value = codeBlock.getConstant(operand.offset());
256                 SourceCodeRepresentation sourceCodeRepresentation = codeBlock.constantSourceCodeRepresentation(operand.offset());
257                 if (constantIndex >= oldSize) {
258                     m_constants.grow(constantIndex + 1);
259                     for (unsigned i = oldSize; i < m_constants.size(); ++i)
260                         m_constants[i] = nullptr;
261                 }
262
263                 Node* constantNode = nullptr;
264                 if (sourceCodeRepresentation == SourceCodeRepresentation::Double)
265                     constantNode = addToGraph(DoubleConstant, OpInfo(m_graph.freezeStrong(jsDoubleNumber(value.asNumber()))));
266                 else
267                     constantNode = addToGraph(JSConstant, OpInfo(m_graph.freezeStrong(value)));
268                 m_constants[constantIndex] = constantNode;
269             }
270             ASSERT(m_constants[constantIndex]);
271             return m_constants[constantIndex];
272         }
273         
274         if (inlineCallFrame()) {
275             if (!inlineCallFrame()->isClosureCall) {
276                 JSFunction* callee = inlineCallFrame()->calleeConstant();
277                 if (operand.offset() == JSStack::Callee)
278                     return weakJSConstant(callee);
279             }
280         } else if (operand.offset() == JSStack::Callee) {
281             // We have to do some constant-folding here because this enables CreateThis folding. Note
282             // that we don't have such watchpoint-based folding for inlined uses of Callee, since in that
283             // case if the function is a singleton then we already know it.
284             if (FunctionExecutable* executable = jsDynamicCast<FunctionExecutable*>(m_codeBlock->ownerExecutable())) {
285                 InferredValue* singleton = executable->singletonFunction();
286                 if (JSValue value = singleton->inferredValue()) {
287                     m_graph.watchpoints().addLazily(singleton);
288                     JSFunction* function = jsCast<JSFunction*>(value);
289                     return weakJSConstant(function);
290                 }
291             }
292             return addToGraph(GetCallee);
293         }
294         
295         return getDirect(m_inlineStackTop->remapOperand(operand));
296     }
297     
298     enum SetMode {
299         // A normal set which follows a two-phase commit that spans code origins. During
300         // the current code origin it issues a MovHint, and at the start of the next
301         // code origin there will be a SetLocal. If the local needs flushing, the second
302         // SetLocal will be preceded with a Flush.
303         NormalSet,
304         
305         // A set where the SetLocal happens immediately and there is still a Flush. This
306         // is relevant when assigning to a local in tricky situations for the delayed
307         // SetLocal logic but where we know that we have not performed any side effects
308         // within this code origin. This is a safe replacement for NormalSet anytime we
309         // know that we have not yet performed side effects in this code origin.
310         ImmediateSetWithFlush,
311         
312         // A set where the SetLocal happens immediately and we do not Flush it even if
313         // this is a local that is marked as needing it. This is relevant when
314         // initializing locals at the top of a function.
315         ImmediateNakedSet
316     };
317     Node* setDirect(VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
318     {
319         addToGraph(MovHint, OpInfo(operand.offset()), value);
320
321         DelayedSetLocal delayed(currentCodeOrigin(), operand, value);
322         
323         if (setMode == NormalSet) {
324             m_setLocalQueue.append(delayed);
325             return 0;
326         }
327         
328         return delayed.execute(this, setMode);
329     }
330     
331     void processSetLocalQueue()
332     {
333         for (unsigned i = 0; i < m_setLocalQueue.size(); ++i)
334             m_setLocalQueue[i].execute(this);
335         m_setLocalQueue.resize(0);
336     }
337
338     Node* set(VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
339     {
340         return setDirect(m_inlineStackTop->remapOperand(operand), value, setMode);
341     }
342     
343     Node* injectLazyOperandSpeculation(Node* node)
344     {
345         ASSERT(node->op() == GetLocal);
346         ASSERT(node->origin.semantic.bytecodeIndex == m_currentIndex);
347         ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
348         LazyOperandValueProfileKey key(m_currentIndex, node->local());
349         SpeculatedType prediction = m_inlineStackTop->m_lazyOperands.prediction(locker, key);
350         node->variableAccessData()->predict(prediction);
351         return node;
352     }
353
354     // Used in implementing get/set, above, where the operand is a local variable.
355     Node* getLocal(VirtualRegister operand)
356     {
357         unsigned local = operand.toLocal();
358
359         Node* node = m_currentBlock->variablesAtTail.local(local);
360         
361         // This has two goals: 1) link together variable access datas, and 2)
362         // try to avoid creating redundant GetLocals. (1) is required for
363         // correctness - no other phase will ensure that block-local variable
364         // access data unification is done correctly. (2) is purely opportunistic
365         // and is meant as an compile-time optimization only.
366         
367         VariableAccessData* variable;
368         
369         if (node) {
370             variable = node->variableAccessData();
371             
372             switch (node->op()) {
373             case GetLocal:
374                 return node;
375             case SetLocal:
376                 return node->child1().node();
377             default:
378                 break;
379             }
380         } else
381             variable = newVariableAccessData(operand);
382         
383         node = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variable)));
384         m_currentBlock->variablesAtTail.local(local) = node;
385         return node;
386     }
387
388     Node* setLocal(const CodeOrigin& semanticOrigin, VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
389     {
390         CodeOrigin oldSemanticOrigin = m_currentSemanticOrigin;
391         m_currentSemanticOrigin = semanticOrigin;
392
393         unsigned local = operand.toLocal();
394         
395         if (setMode != ImmediateNakedSet) {
396             ArgumentPosition* argumentPosition = findArgumentPositionForLocal(operand);
397             if (argumentPosition)
398                 flushDirect(operand, argumentPosition);
399             else if (m_hasDebuggerEnabled && operand == m_codeBlock->scopeRegister())
400                 flush(operand);
401         }
402
403         VariableAccessData* variableAccessData = newVariableAccessData(operand);
404         variableAccessData->mergeStructureCheckHoistingFailed(
405             m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex, BadCache));
406         variableAccessData->mergeCheckArrayHoistingFailed(
407             m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex, BadIndexingType));
408         Node* node = addToGraph(SetLocal, OpInfo(variableAccessData), value);
409         m_currentBlock->variablesAtTail.local(local) = node;
410
411         m_currentSemanticOrigin = oldSemanticOrigin;
412         return node;
413     }
414
415     // Used in implementing get/set, above, where the operand is an argument.
416     Node* getArgument(VirtualRegister operand)
417     {
418         unsigned argument = operand.toArgument();
419         ASSERT(argument < m_numArguments);
420         
421         Node* node = m_currentBlock->variablesAtTail.argument(argument);
422
423         VariableAccessData* variable;
424         
425         if (node) {
426             variable = node->variableAccessData();
427             
428             switch (node->op()) {
429             case GetLocal:
430                 return node;
431             case SetLocal:
432                 return node->child1().node();
433             default:
434                 break;
435             }
436         } else
437             variable = newVariableAccessData(operand);
438         
439         node = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variable)));
440         m_currentBlock->variablesAtTail.argument(argument) = node;
441         return node;
442     }
443     Node* setArgument(const CodeOrigin& semanticOrigin, VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
444     {
445         CodeOrigin oldSemanticOrigin = m_currentSemanticOrigin;
446         m_currentSemanticOrigin = semanticOrigin;
447
448         unsigned argument = operand.toArgument();
449         ASSERT(argument < m_numArguments);
450         
451         VariableAccessData* variableAccessData = newVariableAccessData(operand);
452
453         // Always flush arguments, except for 'this'. If 'this' is created by us,
454         // then make sure that it's never unboxed.
455         if (argument) {
456             if (setMode != ImmediateNakedSet)
457                 flushDirect(operand);
458         } else if (m_codeBlock->specializationKind() == CodeForConstruct)
459             variableAccessData->mergeShouldNeverUnbox(true);
460         
461         variableAccessData->mergeStructureCheckHoistingFailed(
462             m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex, BadCache));
463         variableAccessData->mergeCheckArrayHoistingFailed(
464             m_inlineStackTop->m_exitProfile.hasExitSite(semanticOrigin.bytecodeIndex, BadIndexingType));
465         Node* node = addToGraph(SetLocal, OpInfo(variableAccessData), value);
466         m_currentBlock->variablesAtTail.argument(argument) = node;
467
468         m_currentSemanticOrigin = oldSemanticOrigin;
469         return node;
470     }
471     
472     ArgumentPosition* findArgumentPositionForArgument(int argument)
473     {
474         InlineStackEntry* stack = m_inlineStackTop;
475         while (stack->m_inlineCallFrame)
476             stack = stack->m_caller;
477         return stack->m_argumentPositions[argument];
478     }
479     
480     ArgumentPosition* findArgumentPositionForLocal(VirtualRegister operand)
481     {
482         for (InlineStackEntry* stack = m_inlineStackTop; ; stack = stack->m_caller) {
483             InlineCallFrame* inlineCallFrame = stack->m_inlineCallFrame;
484             if (!inlineCallFrame)
485                 break;
486             if (operand.offset() < static_cast<int>(inlineCallFrame->stackOffset + JSStack::CallFrameHeaderSize))
487                 continue;
488             if (operand.offset() == inlineCallFrame->stackOffset + CallFrame::thisArgumentOffset())
489                 continue;
490             if (operand.offset() >= static_cast<int>(inlineCallFrame->stackOffset + CallFrame::thisArgumentOffset() + inlineCallFrame->arguments.size()))
491                 continue;
492             int argument = VirtualRegister(operand.offset() - inlineCallFrame->stackOffset).toArgument();
493             return stack->m_argumentPositions[argument];
494         }
495         return 0;
496     }
497     
498     ArgumentPosition* findArgumentPosition(VirtualRegister operand)
499     {
500         if (operand.isArgument())
501             return findArgumentPositionForArgument(operand.toArgument());
502         return findArgumentPositionForLocal(operand);
503     }
504
505     void flush(VirtualRegister operand)
506     {
507         flushDirect(m_inlineStackTop->remapOperand(operand));
508     }
509     
510     void flushDirect(VirtualRegister operand)
511     {
512         flushDirect(operand, findArgumentPosition(operand));
513     }
514     
515     void flushDirect(VirtualRegister operand, ArgumentPosition* argumentPosition)
516     {
517         ASSERT(!operand.isConstant());
518         
519         Node* node = m_currentBlock->variablesAtTail.operand(operand);
520         
521         VariableAccessData* variable;
522         
523         if (node)
524             variable = node->variableAccessData();
525         else
526             variable = newVariableAccessData(operand);
527         
528         node = addToGraph(Flush, OpInfo(variable));
529         m_currentBlock->variablesAtTail.operand(operand) = node;
530         if (argumentPosition)
531             argumentPosition->addVariable(variable);
532     }
533     
534     void flush(InlineStackEntry* inlineStackEntry)
535     {
536         int numArguments;
537         if (InlineCallFrame* inlineCallFrame = inlineStackEntry->m_inlineCallFrame) {
538             ASSERT(!m_hasDebuggerEnabled);
539             numArguments = inlineCallFrame->arguments.size();
540             if (inlineCallFrame->isClosureCall)
541                 flushDirect(inlineStackEntry->remapOperand(VirtualRegister(JSStack::Callee)));
542             if (inlineCallFrame->isVarargs())
543                 flushDirect(inlineStackEntry->remapOperand(VirtualRegister(JSStack::ArgumentCount)));
544         } else
545             numArguments = inlineStackEntry->m_codeBlock->numParameters();
546         for (unsigned argument = numArguments; argument-- > 1;)
547             flushDirect(inlineStackEntry->remapOperand(virtualRegisterForArgument(argument)));
548         if (m_hasDebuggerEnabled)
549             flush(m_codeBlock->scopeRegister());
550     }
551
552     void flushForTerminal()
553     {
554         for (InlineStackEntry* inlineStackEntry = m_inlineStackTop; inlineStackEntry; inlineStackEntry = inlineStackEntry->m_caller)
555             flush(inlineStackEntry);
556     }
557
558     void flushForReturn()
559     {
560         flush(m_inlineStackTop);
561     }
562     
563     void flushIfTerminal(SwitchData& data)
564     {
565         if (data.fallThrough.bytecodeIndex() > m_currentIndex)
566             return;
567         
568         for (unsigned i = data.cases.size(); i--;) {
569             if (data.cases[i].target.bytecodeIndex() > m_currentIndex)
570                 return;
571         }
572         
573         flushForTerminal();
574     }
575
576     // Assumes that the constant should be strongly marked.
577     Node* jsConstant(JSValue constantValue)
578     {
579         return addToGraph(JSConstant, OpInfo(m_graph.freezeStrong(constantValue)));
580     }
581
582     Node* weakJSConstant(JSValue constantValue)
583     {
584         return addToGraph(JSConstant, OpInfo(m_graph.freeze(constantValue)));
585     }
586
587     // Helper functions to get/set the this value.
588     Node* getThis()
589     {
590         return get(m_inlineStackTop->m_codeBlock->thisRegister());
591     }
592
593     void setThis(Node* value)
594     {
595         set(m_inlineStackTop->m_codeBlock->thisRegister(), value);
596     }
597
598     InlineCallFrame* inlineCallFrame()
599     {
600         return m_inlineStackTop->m_inlineCallFrame;
601     }
602
603     CodeOrigin currentCodeOrigin()
604     {
605         return CodeOrigin(m_currentIndex, inlineCallFrame());
606     }
607
608     NodeOrigin currentNodeOrigin()
609     {
610         // FIXME: We should set the forExit origin only on those nodes that can exit.
611         // https://bugs.webkit.org/show_bug.cgi?id=145204
612         if (m_currentSemanticOrigin.isSet())
613             return NodeOrigin(m_currentSemanticOrigin, currentCodeOrigin());
614         return NodeOrigin(currentCodeOrigin());
615     }
616     
617     BranchData* branchData(unsigned taken, unsigned notTaken)
618     {
619         // We assume that branches originating from bytecode always have a fall-through. We
620         // use this assumption to avoid checking for the creation of terminal blocks.
621         ASSERT((taken > m_currentIndex) || (notTaken > m_currentIndex));
622         BranchData* data = m_graph.m_branchData.add();
623         *data = BranchData::withBytecodeIndices(taken, notTaken);
624         return data;
625     }
626     
627     Node* addToGraph(Node* node)
628     {
629         if (Options::verboseDFGByteCodeParsing())
630             dataLog("        appended ", node, " ", Graph::opName(node->op()), "\n");
631         m_currentBlock->append(node);
632         return node;
633     }
634     
635     Node* addToGraph(NodeType op, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
636     {
637         Node* result = m_graph.addNode(
638             SpecNone, op, currentNodeOrigin(), Edge(child1), Edge(child2),
639             Edge(child3));
640         return addToGraph(result);
641     }
642     Node* addToGraph(NodeType op, Edge child1, Edge child2 = Edge(), Edge child3 = Edge())
643     {
644         Node* result = m_graph.addNode(
645             SpecNone, op, currentNodeOrigin(), child1, child2, child3);
646         return addToGraph(result);
647     }
648     Node* addToGraph(NodeType op, OpInfo info, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
649     {
650         Node* result = m_graph.addNode(
651             SpecNone, op, currentNodeOrigin(), info, Edge(child1), Edge(child2),
652             Edge(child3));
653         return addToGraph(result);
654     }
655     Node* addToGraph(NodeType op, OpInfo info1, OpInfo info2, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
656     {
657         Node* result = m_graph.addNode(
658             SpecNone, op, currentNodeOrigin(), info1, info2,
659             Edge(child1), Edge(child2), Edge(child3));
660         return addToGraph(result);
661     }
662     
663     Node* addToGraph(Node::VarArgTag, NodeType op, OpInfo info1, OpInfo info2)
664     {
665         Node* result = m_graph.addNode(
666             SpecNone, Node::VarArg, op, currentNodeOrigin(), info1, info2,
667             m_graph.m_varArgChildren.size() - m_numPassedVarArgs, m_numPassedVarArgs);
668         addToGraph(result);
669         
670         m_numPassedVarArgs = 0;
671         
672         return result;
673     }
674     
675     void addVarArgChild(Node* child)
676     {
677         m_graph.m_varArgChildren.append(Edge(child));
678         m_numPassedVarArgs++;
679     }
680     
681     Node* addCallWithoutSettingResult(
682         NodeType op, OpInfo opInfo, Node* callee, int argCount, int registerOffset,
683         SpeculatedType prediction)
684     {
685         addVarArgChild(callee);
686         size_t parameterSlots = JSStack::CallFrameHeaderSize - JSStack::CallerFrameAndPCSize + argCount;
687         if (parameterSlots > m_parameterSlots)
688             m_parameterSlots = parameterSlots;
689
690         for (int i = 0; i < argCount; ++i)
691             addVarArgChild(get(virtualRegisterForArgument(i, registerOffset)));
692
693         return addToGraph(Node::VarArg, op, opInfo, OpInfo(prediction));
694     }
695     
696     Node* addCall(
697         int result, NodeType op, OpInfo opInfo, Node* callee, int argCount, int registerOffset,
698         SpeculatedType prediction)
699     {
700         Node* call = addCallWithoutSettingResult(
701             op, opInfo, callee, argCount, registerOffset, prediction);
702         VirtualRegister resultReg(result);
703         if (resultReg.isValid())
704             set(resultReg, call);
705         return call;
706     }
707     
708     Node* cellConstantWithStructureCheck(JSCell* object, Structure* structure)
709     {
710         Node* objectNode = weakJSConstant(object);
711         addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(structure)), objectNode);
712         return objectNode;
713     }
714     
715     SpeculatedType getPredictionWithoutOSRExit(unsigned bytecodeIndex)
716     {
717         ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
718         return m_inlineStackTop->m_profiledBlock->valueProfilePredictionForBytecodeOffset(locker, bytecodeIndex);
719     }
720
721     SpeculatedType getPrediction(unsigned bytecodeIndex)
722     {
723         SpeculatedType prediction = getPredictionWithoutOSRExit(bytecodeIndex);
724         
725         if (prediction == SpecNone) {
726             // We have no information about what values this node generates. Give up
727             // on executing this code, since we're likely to do more damage than good.
728             addToGraph(ForceOSRExit);
729         }
730         
731         return prediction;
732     }
733     
734     SpeculatedType getPredictionWithoutOSRExit()
735     {
736         return getPredictionWithoutOSRExit(m_currentIndex);
737     }
738     
739     SpeculatedType getPrediction()
740     {
741         return getPrediction(m_currentIndex);
742     }
743     
744     ArrayMode getArrayMode(ArrayProfile* profile, Array::Action action)
745     {
746         ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
747         profile->computeUpdatedPrediction(locker, m_inlineStackTop->m_profiledBlock);
748         return ArrayMode::fromObserved(locker, profile, action, false);
749     }
750     
751     ArrayMode getArrayMode(ArrayProfile* profile)
752     {
753         return getArrayMode(profile, Array::Read);
754     }
755     
756     ArrayMode getArrayModeConsideringSlowPath(ArrayProfile* profile, Array::Action action)
757     {
758         ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
759         
760         profile->computeUpdatedPrediction(locker, m_inlineStackTop->m_profiledBlock);
761         
762         bool makeSafe =
763             m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)
764             || profile->outOfBounds(locker);
765         
766         ArrayMode result = ArrayMode::fromObserved(locker, profile, action, makeSafe);
767         
768         return result;
769     }
770     
771     Node* makeSafe(Node* node)
772     {
773         if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
774             node->mergeFlags(NodeMayOverflowInDFG);
775         if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
776             node->mergeFlags(NodeMayNegZeroInDFG);
777         
778         if (!isX86() && node->op() == ArithMod)
779             return node;
780
781         if (!m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex))
782             return node;
783         
784         switch (node->op()) {
785         case UInt32ToNumber:
786         case ArithAdd:
787         case ArithSub:
788         case ValueAdd:
789         case ArithMod: // for ArithMod "MayOverflow" means we tried to divide by zero, or we saw double.
790             node->mergeFlags(NodeMayOverflowInBaseline);
791             break;
792             
793         case ArithNegate:
794             // Currently we can't tell the difference between a negation overflowing
795             // (i.e. -(1 << 31)) or generating negative zero (i.e. -0). If it took slow
796             // path then we assume that it did both of those things.
797             node->mergeFlags(NodeMayOverflowInBaseline);
798             node->mergeFlags(NodeMayNegZeroInBaseline);
799             break;
800
801         case ArithMul:
802             // FIXME: We should detect cases where we only overflowed but never created
803             // negative zero.
804             // https://bugs.webkit.org/show_bug.cgi?id=132470
805             if (m_inlineStackTop->m_profiledBlock->likelyToTakeDeepestSlowCase(m_currentIndex)
806                 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
807                 node->mergeFlags(NodeMayOverflowInBaseline | NodeMayNegZeroInBaseline);
808             else if (m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)
809                 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
810                 node->mergeFlags(NodeMayNegZeroInBaseline);
811             break;
812             
813         default:
814             RELEASE_ASSERT_NOT_REACHED();
815             break;
816         }
817         
818         return node;
819     }
820     
821     Node* makeDivSafe(Node* node)
822     {
823         ASSERT(node->op() == ArithDiv);
824         
825         if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
826             node->mergeFlags(NodeMayOverflowInDFG);
827         if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
828             node->mergeFlags(NodeMayNegZeroInDFG);
829         
830         // The main slow case counter for op_div in the old JIT counts only when
831         // the operands are not numbers. We don't care about that since we already
832         // have speculations in place that take care of that separately. We only
833         // care about when the outcome of the division is not an integer, which
834         // is what the special fast case counter tells us.
835         
836         if (!m_inlineStackTop->m_profiledBlock->couldTakeSpecialFastCase(m_currentIndex))
837             return node;
838         
839         // FIXME: It might be possible to make this more granular.
840         node->mergeFlags(NodeMayOverflowInBaseline | NodeMayNegZeroInBaseline);
841         
842         return node;
843     }
844     
845     void noticeArgumentsUse()
846     {
847         // All of the arguments in this function need to be formatted as JSValues because we will
848         // load from them in a random-access fashion and we don't want to have to switch on
849         // format.
850         
851         for (ArgumentPosition* argument : m_inlineStackTop->m_argumentPositions)
852             argument->mergeShouldNeverUnbox(true);
853     }
854     
855     void buildOperandMapsIfNecessary();
856     
857     VM* m_vm;
858     CodeBlock* m_codeBlock;
859     CodeBlock* m_profiledBlock;
860     Graph& m_graph;
861
862     // The current block being generated.
863     BasicBlock* m_currentBlock;
864     // The bytecode index of the current instruction being generated.
865     unsigned m_currentIndex;
866     // The semantic origin of the current node if different from the current Index.
867     CodeOrigin m_currentSemanticOrigin;
868
869     FrozenValue* m_constantUndefined;
870     FrozenValue* m_constantNull;
871     FrozenValue* m_constantNaN;
872     FrozenValue* m_constantOne;
873     Vector<Node*, 16> m_constants;
874
875     // The number of arguments passed to the function.
876     unsigned m_numArguments;
877     // The number of locals (vars + temporaries) used in the function.
878     unsigned m_numLocals;
879     // The number of slots (in units of sizeof(Register)) that we need to
880     // preallocate for arguments to outgoing calls from this frame. This
881     // number includes the CallFrame slots that we initialize for the callee
882     // (but not the callee-initialized CallerFrame and ReturnPC slots).
883     // This number is 0 if and only if this function is a leaf.
884     unsigned m_parameterSlots;
885     // The number of var args passed to the next var arg node.
886     unsigned m_numPassedVarArgs;
887
888     HashMap<ConstantBufferKey, unsigned> m_constantBufferCache;
889     
890     struct InlineStackEntry {
891         ByteCodeParser* m_byteCodeParser;
892         
893         CodeBlock* m_codeBlock;
894         CodeBlock* m_profiledBlock;
895         InlineCallFrame* m_inlineCallFrame;
896         
897         ScriptExecutable* executable() { return m_codeBlock->ownerExecutable(); }
898         
899         QueryableExitProfile m_exitProfile;
900         
901         // Remapping of identifier and constant numbers from the code block being
902         // inlined (inline callee) to the code block that we're inlining into
903         // (the machine code block, which is the transitive, though not necessarily
904         // direct, caller).
905         Vector<unsigned> m_identifierRemap;
906         Vector<unsigned> m_constantBufferRemap;
907         Vector<unsigned> m_switchRemap;
908         
909         // Blocks introduced by this code block, which need successor linking.
910         // May include up to one basic block that includes the continuation after
911         // the callsite in the caller. These must be appended in the order that they
912         // are created, but their bytecodeBegin values need not be in order as they
913         // are ignored.
914         Vector<UnlinkedBlock> m_unlinkedBlocks;
915         
916         // Potential block linking targets. Must be sorted by bytecodeBegin, and
917         // cannot have two blocks that have the same bytecodeBegin.
918         Vector<BasicBlock*> m_blockLinkingTargets;
919         
920         // If the callsite's basic block was split into two, then this will be
921         // the head of the callsite block. It needs its successors linked to the
922         // m_unlinkedBlocks, but not the other way around: there's no way for
923         // any blocks in m_unlinkedBlocks to jump back into this block.
924         BasicBlock* m_callsiteBlockHead;
925         
926         // Does the callsite block head need linking? This is typically true
927         // but will be false for the machine code block's inline stack entry
928         // (since that one is not inlined) and for cases where an inline callee
929         // did the linking for us.
930         bool m_callsiteBlockHeadNeedsLinking;
931         
932         VirtualRegister m_returnValue;
933         
934         // Speculations about variable types collected from the profiled code block,
935         // which are based on OSR exit profiles that past DFG compilatins of this
936         // code block had gathered.
937         LazyOperandValueProfileParser m_lazyOperands;
938         
939         CallLinkInfoMap m_callLinkInfos;
940         StubInfoMap m_stubInfos;
941         
942         // Did we see any returns? We need to handle the (uncommon but necessary)
943         // case where a procedure that does not return was inlined.
944         bool m_didReturn;
945         
946         // Did we have any early returns?
947         bool m_didEarlyReturn;
948         
949         // Pointers to the argument position trackers for this slice of code.
950         Vector<ArgumentPosition*> m_argumentPositions;
951         
952         InlineStackEntry* m_caller;
953         
954         InlineStackEntry(
955             ByteCodeParser*,
956             CodeBlock*,
957             CodeBlock* profiledBlock,
958             BasicBlock* callsiteBlockHead,
959             JSFunction* callee, // Null if this is a closure call.
960             VirtualRegister returnValueVR,
961             VirtualRegister inlineCallFrameStart,
962             int argumentCountIncludingThis,
963             InlineCallFrame::Kind);
964         
965         ~InlineStackEntry()
966         {
967             m_byteCodeParser->m_inlineStackTop = m_caller;
968         }
969         
970         VirtualRegister remapOperand(VirtualRegister operand) const
971         {
972             if (!m_inlineCallFrame)
973                 return operand;
974             
975             ASSERT(!operand.isConstant());
976
977             return VirtualRegister(operand.offset() + m_inlineCallFrame->stackOffset);
978         }
979     };
980     
981     InlineStackEntry* m_inlineStackTop;
982     
983     struct DelayedSetLocal {
984         CodeOrigin m_origin;
985         VirtualRegister m_operand;
986         Node* m_value;
987         
988         DelayedSetLocal() { }
989         DelayedSetLocal(const CodeOrigin& origin, VirtualRegister operand, Node* value)
990             : m_origin(origin)
991             , m_operand(operand)
992             , m_value(value)
993         {
994         }
995         
996         Node* execute(ByteCodeParser* parser, SetMode setMode = NormalSet)
997         {
998             if (m_operand.isArgument())
999                 return parser->setArgument(m_origin, m_operand, m_value, setMode);
1000             return parser->setLocal(m_origin, m_operand, m_value, setMode);
1001         }
1002     };
1003     
1004     Vector<DelayedSetLocal, 2> m_setLocalQueue;
1005
1006     // Have we built operand maps? We initialize them lazily, and only when doing
1007     // inlining.
1008     bool m_haveBuiltOperandMaps;
1009     // Mapping between identifier names and numbers.
1010     BorrowedIdentifierMap m_identifierMap;
1011     
1012     CodeBlock* m_dfgCodeBlock;
1013     CallLinkStatus::ContextMap m_callContextMap;
1014     StubInfoMap m_dfgStubInfos;
1015     
1016     Instruction* m_currentInstruction;
1017     bool m_hasDebuggerEnabled;
1018 };
1019
1020 #define NEXT_OPCODE(name) \
1021     m_currentIndex += OPCODE_LENGTH(name); \
1022     continue
1023
1024 #define LAST_OPCODE(name) \
1025     m_currentIndex += OPCODE_LENGTH(name); \
1026     return shouldContinueParsing
1027
1028 void ByteCodeParser::handleCall(Instruction* pc, NodeType op, CodeSpecializationKind kind)
1029 {
1030     ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct));
1031     handleCall(
1032         pc[1].u.operand, op, kind, OPCODE_LENGTH(op_call),
1033         pc[2].u.operand, pc[3].u.operand, -pc[4].u.operand);
1034 }
1035
1036 void ByteCodeParser::handleCall(
1037     int result, NodeType op, CodeSpecializationKind kind, unsigned instructionSize,
1038     int callee, int argumentCountIncludingThis, int registerOffset)
1039 {
1040     Node* callTarget = get(VirtualRegister(callee));
1041     
1042     CallLinkStatus callLinkStatus = CallLinkStatus::computeFor(
1043         m_inlineStackTop->m_profiledBlock, currentCodeOrigin(),
1044         m_inlineStackTop->m_callLinkInfos, m_callContextMap);
1045     
1046     handleCall(
1047         result, op, InlineCallFrame::kindFor(kind), instructionSize, callTarget,
1048         argumentCountIncludingThis, registerOffset, callLinkStatus);
1049 }
1050     
1051 void ByteCodeParser::handleCall(
1052     int result, NodeType op, InlineCallFrame::Kind kind, unsigned instructionSize,
1053     Node* callTarget, int argumentCountIncludingThis, int registerOffset,
1054     CallLinkStatus callLinkStatus)
1055 {
1056     handleCall(
1057         result, op, kind, instructionSize, callTarget, argumentCountIncludingThis,
1058         registerOffset, callLinkStatus, getPrediction());
1059 }
1060
1061 void ByteCodeParser::handleCall(
1062     int result, NodeType op, InlineCallFrame::Kind kind, unsigned instructionSize,
1063     Node* callTarget, int argumentCountIncludingThis, int registerOffset,
1064     CallLinkStatus callLinkStatus, SpeculatedType prediction)
1065 {
1066     ASSERT(registerOffset <= 0);
1067     
1068     if (callTarget->isCellConstant())
1069         callLinkStatus.setProvenConstantCallee(CallVariant(callTarget->asCell()));
1070     
1071     if (Options::verboseDFGByteCodeParsing())
1072         dataLog("    Handling call at ", currentCodeOrigin(), ": ", callLinkStatus, "\n");
1073     
1074     if (!callLinkStatus.canOptimize()) {
1075         // Oddly, this conflates calls that haven't executed with calls that behaved sufficiently polymorphically
1076         // that we cannot optimize them.
1077         
1078         addCall(result, op, OpInfo(), callTarget, argumentCountIncludingThis, registerOffset, prediction);
1079         return;
1080     }
1081     
1082     unsigned nextOffset = m_currentIndex + instructionSize;
1083     
1084     OpInfo callOpInfo;
1085     
1086     if (handleInlining(callTarget, result, callLinkStatus, registerOffset, virtualRegisterForArgument(0, registerOffset), VirtualRegister(), 0, argumentCountIncludingThis, nextOffset, op, kind, prediction)) {
1087         if (m_graph.compilation())
1088             m_graph.compilation()->noticeInlinedCall();
1089         return;
1090     }
1091     
1092 #if ENABLE(FTL_NATIVE_CALL_INLINING)
1093     if (isFTL(m_graph.m_plan.mode) && Options::optimizeNativeCalls() && callLinkStatus.size() == 1 && !callLinkStatus.couldTakeSlowPath()) {
1094         CallVariant callee = callLinkStatus[0];
1095         JSFunction* function = callee.function();
1096         CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1097         if (function && function->isHostFunction()) {
1098             emitFunctionChecks(callee, callTarget, virtualRegisterForArgument(0, registerOffset));
1099             callOpInfo = OpInfo(m_graph.freeze(function));
1100
1101             if (op == Call)
1102                 op = NativeCall;
1103             else {
1104                 ASSERT(op == Construct);
1105                 op = NativeConstruct;
1106             }
1107         }
1108     }
1109 #endif
1110     
1111     addCall(result, op, callOpInfo, callTarget, argumentCountIncludingThis, registerOffset, prediction);
1112 }
1113
1114 void ByteCodeParser::handleVarargsCall(Instruction* pc, NodeType op, CodeSpecializationKind kind)
1115 {
1116     ASSERT(OPCODE_LENGTH(op_call_varargs) == OPCODE_LENGTH(op_construct_varargs));
1117     
1118     int result = pc[1].u.operand;
1119     int callee = pc[2].u.operand;
1120     int thisReg = pc[3].u.operand;
1121     int arguments = pc[4].u.operand;
1122     int firstFreeReg = pc[5].u.operand;
1123     int firstVarArgOffset = pc[6].u.operand;
1124     
1125     SpeculatedType prediction = getPrediction();
1126     
1127     Node* callTarget = get(VirtualRegister(callee));
1128     
1129     CallLinkStatus callLinkStatus = CallLinkStatus::computeFor(
1130         m_inlineStackTop->m_profiledBlock, currentCodeOrigin(),
1131         m_inlineStackTop->m_callLinkInfos, m_callContextMap);
1132     if (callTarget->isCellConstant())
1133         callLinkStatus.setProvenConstantCallee(CallVariant(callTarget->asCell()));
1134     
1135     if (Options::verboseDFGByteCodeParsing())
1136         dataLog("    Varargs call link status at ", currentCodeOrigin(), ": ", callLinkStatus, "\n");
1137     
1138     if (callLinkStatus.canOptimize()
1139         && handleInlining(callTarget, result, callLinkStatus, firstFreeReg, VirtualRegister(thisReg), VirtualRegister(arguments), firstVarArgOffset, 0, m_currentIndex + OPCODE_LENGTH(op_call_varargs), op, InlineCallFrame::varargsKindFor(kind), prediction)) {
1140         if (m_graph.compilation())
1141             m_graph.compilation()->noticeInlinedCall();
1142         return;
1143     }
1144     
1145     CallVarargsData* data = m_graph.m_callVarargsData.add();
1146     data->firstVarArgOffset = firstVarArgOffset;
1147     
1148     Node* thisChild = get(VirtualRegister(thisReg));
1149     
1150     Node* call = addToGraph(op, OpInfo(data), OpInfo(prediction), callTarget, get(VirtualRegister(arguments)), thisChild);
1151     VirtualRegister resultReg(result);
1152     if (resultReg.isValid())
1153         set(resultReg, call);
1154 }
1155
1156 void ByteCodeParser::emitFunctionChecks(CallVariant callee, Node* callTarget, VirtualRegister thisArgumentReg)
1157 {
1158     Node* thisArgument;
1159     if (thisArgumentReg.isValid())
1160         thisArgument = get(thisArgumentReg);
1161     else
1162         thisArgument = 0;
1163
1164     JSCell* calleeCell;
1165     Node* callTargetForCheck;
1166     if (callee.isClosureCall()) {
1167         calleeCell = callee.executable();
1168         callTargetForCheck = addToGraph(GetExecutable, callTarget);
1169     } else {
1170         calleeCell = callee.nonExecutableCallee();
1171         callTargetForCheck = callTarget;
1172     }
1173     
1174     ASSERT(calleeCell);
1175     addToGraph(CheckCell, OpInfo(m_graph.freeze(calleeCell)), callTargetForCheck, thisArgument);
1176 }
1177
1178 void ByteCodeParser::emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis)
1179 {
1180     for (int i = 0; i < argumentCountIncludingThis; ++i)
1181         addToGraph(Phantom, get(virtualRegisterForArgument(i, registerOffset)));
1182 }
1183
1184 unsigned ByteCodeParser::inliningCost(CallVariant callee, int argumentCountIncludingThis, CodeSpecializationKind kind)
1185 {
1186     if (verbose)
1187         dataLog("Considering inlining ", callee, " into ", currentCodeOrigin(), "\n");
1188     
1189     if (m_hasDebuggerEnabled) {
1190         if (verbose)
1191             dataLog("    Failing because the debugger is in use.\n");
1192         return UINT_MAX;
1193     }
1194
1195     FunctionExecutable* executable = callee.functionExecutable();
1196     if (!executable) {
1197         if (verbose)
1198             dataLog("    Failing because there is no function executable.\n");
1199         return UINT_MAX;
1200     }
1201     
1202     // Does the number of arguments we're passing match the arity of the target? We currently
1203     // inline only if the number of arguments passed is greater than or equal to the number
1204     // arguments expected.
1205     if (static_cast<int>(executable->parameterCount()) + 1 > argumentCountIncludingThis) {
1206         if (verbose)
1207             dataLog("    Failing because of arity mismatch.\n");
1208         return UINT_MAX;
1209     }
1210     
1211     // Do we have a code block, and does the code block's size match the heuristics/requirements for
1212     // being an inline candidate? We might not have a code block (1) if code was thrown away,
1213     // (2) if we simply hadn't actually made this call yet or (3) code is a builtin function and
1214     // specialization kind is construct. In the former 2 cases, we could still theoretically attempt
1215     // to inline it if we had a static proof of what was being called; this might happen for example
1216     // if you call a global function, where watchpointing gives us static information. Overall,
1217     // it's a rare case because we expect that any hot callees would have already been compiled.
1218     CodeBlock* codeBlock = executable->baselineCodeBlockFor(kind);
1219     if (!codeBlock) {
1220         if (verbose)
1221             dataLog("    Failing because no code block available.\n");
1222         return UINT_MAX;
1223     }
1224     CapabilityLevel capabilityLevel = inlineFunctionForCapabilityLevel(
1225         codeBlock, kind, callee.isClosureCall());
1226     if (verbose) {
1227         dataLog("    Kind: ", kind, "\n");
1228         dataLog("    Is closure call: ", callee.isClosureCall(), "\n");
1229         dataLog("    Capability level: ", capabilityLevel, "\n");
1230         dataLog("    Might inline function: ", mightInlineFunctionFor(codeBlock, kind), "\n");
1231         dataLog("    Might compile function: ", mightCompileFunctionFor(codeBlock, kind), "\n");
1232         dataLog("    Is supported for inlining: ", isSupportedForInlining(codeBlock), "\n");
1233         dataLog("    Needs activation: ", codeBlock->ownerExecutable()->needsActivation(), "\n");
1234         dataLog("    Is inlining candidate: ", codeBlock->ownerExecutable()->isInliningCandidate(), "\n");
1235     }
1236     if (!canInline(capabilityLevel)) {
1237         if (verbose)
1238             dataLog("    Failing because the function is not inlineable.\n");
1239         return UINT_MAX;
1240     }
1241     
1242     // Check if the caller is already too large. We do this check here because that's just
1243     // where we happen to also have the callee's code block, and we want that for the
1244     // purpose of unsetting SABI.
1245     if (!isSmallEnoughToInlineCodeInto(m_codeBlock)) {
1246         codeBlock->m_shouldAlwaysBeInlined = false;
1247         if (verbose)
1248             dataLog("    Failing because the caller is too large.\n");
1249         return UINT_MAX;
1250     }
1251     
1252     // FIXME: this should be better at predicting how much bloat we will introduce by inlining
1253     // this function.
1254     // https://bugs.webkit.org/show_bug.cgi?id=127627
1255     
1256     // Have we exceeded inline stack depth, or are we trying to inline a recursive call to
1257     // too many levels? If either of these are detected, then don't inline. We adjust our
1258     // heuristics if we are dealing with a function that cannot otherwise be compiled.
1259     
1260     unsigned depth = 0;
1261     unsigned recursion = 0;
1262     
1263     for (InlineStackEntry* entry = m_inlineStackTop; entry; entry = entry->m_caller) {
1264         ++depth;
1265         if (depth >= Options::maximumInliningDepth()) {
1266             if (verbose)
1267                 dataLog("    Failing because depth exceeded.\n");
1268             return UINT_MAX;
1269         }
1270         
1271         if (entry->executable() == executable) {
1272             ++recursion;
1273             if (recursion >= Options::maximumInliningRecursion()) {
1274                 if (verbose)
1275                     dataLog("    Failing because recursion detected.\n");
1276                 return UINT_MAX;
1277             }
1278         }
1279     }
1280     
1281     if (verbose)
1282         dataLog("    Inlining should be possible.\n");
1283     
1284     // It might be possible to inline.
1285     return codeBlock->instructionCount();
1286 }
1287
1288 template<typename ChecksFunctor>
1289 void ByteCodeParser::inlineCall(Node* callTargetNode, int resultOperand, CallVariant callee, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind kind, CallerLinkability callerLinkability, const ChecksFunctor& insertChecks)
1290 {
1291     CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1292     
1293     ASSERT(inliningCost(callee, argumentCountIncludingThis, specializationKind) != UINT_MAX);
1294     
1295     CodeBlock* codeBlock = callee.functionExecutable()->baselineCodeBlockFor(specializationKind);
1296     insertChecks(codeBlock);
1297
1298     // FIXME: Don't flush constants!
1299     
1300     int inlineCallFrameStart = m_inlineStackTop->remapOperand(VirtualRegister(registerOffset)).offset() + JSStack::CallFrameHeaderSize;
1301     
1302     ensureLocals(
1303         VirtualRegister(inlineCallFrameStart).toLocal() + 1 +
1304         JSStack::CallFrameHeaderSize + codeBlock->m_numCalleeRegisters);
1305     
1306     size_t argumentPositionStart = m_graph.m_argumentPositions.size();
1307
1308     VirtualRegister resultReg(resultOperand);
1309     if (resultReg.isValid())
1310         resultReg = m_inlineStackTop->remapOperand(resultReg);
1311     
1312     InlineStackEntry inlineStackEntry(
1313         this, codeBlock, codeBlock, m_graph.lastBlock(), callee.function(), resultReg,
1314         (VirtualRegister)inlineCallFrameStart, argumentCountIncludingThis, kind);
1315     
1316     // This is where the actual inlining really happens.
1317     unsigned oldIndex = m_currentIndex;
1318     m_currentIndex = 0;
1319
1320     InlineVariableData inlineVariableData;
1321     inlineVariableData.inlineCallFrame = m_inlineStackTop->m_inlineCallFrame;
1322     inlineVariableData.argumentPositionStart = argumentPositionStart;
1323     inlineVariableData.calleeVariable = 0;
1324     
1325     RELEASE_ASSERT(
1326         m_inlineStackTop->m_inlineCallFrame->isClosureCall
1327         == callee.isClosureCall());
1328     if (callee.isClosureCall()) {
1329         VariableAccessData* calleeVariable =
1330             set(VirtualRegister(JSStack::Callee), callTargetNode, ImmediateNakedSet)->variableAccessData();
1331         
1332         calleeVariable->mergeShouldNeverUnbox(true);
1333         
1334         inlineVariableData.calleeVariable = calleeVariable;
1335     }
1336     
1337     m_graph.m_inlineVariableData.append(inlineVariableData);
1338     
1339     parseCodeBlock();
1340     clearCaches(); // Reset our state now that we're back to the outer code.
1341     
1342     m_currentIndex = oldIndex;
1343     
1344     // If the inlined code created some new basic blocks, then we have linking to do.
1345     if (inlineStackEntry.m_callsiteBlockHead != m_graph.lastBlock()) {
1346         
1347         ASSERT(!inlineStackEntry.m_unlinkedBlocks.isEmpty());
1348         if (inlineStackEntry.m_callsiteBlockHeadNeedsLinking)
1349             linkBlock(inlineStackEntry.m_callsiteBlockHead, inlineStackEntry.m_blockLinkingTargets);
1350         else
1351             ASSERT(inlineStackEntry.m_callsiteBlockHead->isLinked);
1352         
1353         if (callerLinkability == CallerDoesNormalLinking)
1354             cancelLinkingForBlock(inlineStackEntry.m_caller, inlineStackEntry.m_callsiteBlockHead);
1355         
1356         linkBlocks(inlineStackEntry.m_unlinkedBlocks, inlineStackEntry.m_blockLinkingTargets);
1357     } else
1358         ASSERT(inlineStackEntry.m_unlinkedBlocks.isEmpty());
1359     
1360     BasicBlock* lastBlock = m_graph.lastBlock();
1361     // If there was a return, but no early returns, then we're done. We allow parsing of
1362     // the caller to continue in whatever basic block we're in right now.
1363     if (!inlineStackEntry.m_didEarlyReturn && inlineStackEntry.m_didReturn) {
1364         if (Options::verboseDFGByteCodeParsing())
1365             dataLog("    Allowing parsing to continue in last inlined block.\n");
1366         
1367         ASSERT(lastBlock->isEmpty() || !lastBlock->terminal());
1368         
1369         // If we created new blocks then the last block needs linking, but in the
1370         // caller. It doesn't need to be linked to, but it needs outgoing links.
1371         if (!inlineStackEntry.m_unlinkedBlocks.isEmpty()) {
1372             // For debugging purposes, set the bytecodeBegin. Note that this doesn't matter
1373             // for release builds because this block will never serve as a potential target
1374             // in the linker's binary search.
1375             if (Options::verboseDFGByteCodeParsing())
1376                 dataLog("        Repurposing last block from ", lastBlock->bytecodeBegin, " to ", m_currentIndex, "\n");
1377             lastBlock->bytecodeBegin = m_currentIndex;
1378             if (callerLinkability == CallerDoesNormalLinking) {
1379                 if (verbose)
1380                     dataLog("Adding unlinked block ", RawPointer(m_graph.lastBlock()), " (one return)\n");
1381                 m_inlineStackTop->m_caller->m_unlinkedBlocks.append(UnlinkedBlock(m_graph.lastBlock()));
1382             }
1383         }
1384         
1385         m_currentBlock = m_graph.lastBlock();
1386         return;
1387     }
1388     
1389     if (Options::verboseDFGByteCodeParsing())
1390         dataLog("    Creating new block after inlining.\n");
1391
1392     // If we get to this point then all blocks must end in some sort of terminals.
1393     ASSERT(lastBlock->terminal());
1394
1395     // Need to create a new basic block for the continuation at the caller.
1396     RefPtr<BasicBlock> block = adoptRef(new BasicBlock(nextOffset, m_numArguments, m_numLocals, PNaN));
1397
1398     // Link the early returns to the basic block we're about to create.
1399     for (size_t i = 0; i < inlineStackEntry.m_unlinkedBlocks.size(); ++i) {
1400         if (!inlineStackEntry.m_unlinkedBlocks[i].m_needsEarlyReturnLinking)
1401             continue;
1402         BasicBlock* blockToLink = inlineStackEntry.m_unlinkedBlocks[i].m_block;
1403         ASSERT(!blockToLink->isLinked);
1404         Node* node = blockToLink->terminal();
1405         ASSERT(node->op() == Jump);
1406         ASSERT(!node->targetBlock());
1407         node->targetBlock() = block.get();
1408         inlineStackEntry.m_unlinkedBlocks[i].m_needsEarlyReturnLinking = false;
1409         if (verbose)
1410             dataLog("Marking ", RawPointer(blockToLink), " as linked (jumps to return)\n");
1411         blockToLink->didLink();
1412     }
1413     
1414     m_currentBlock = block.get();
1415     ASSERT(m_inlineStackTop->m_caller->m_blockLinkingTargets.isEmpty() || m_inlineStackTop->m_caller->m_blockLinkingTargets.last()->bytecodeBegin < nextOffset);
1416     if (verbose)
1417         dataLog("Adding unlinked block ", RawPointer(block.get()), " (many returns)\n");
1418     if (callerLinkability == CallerDoesNormalLinking) {
1419         m_inlineStackTop->m_caller->m_unlinkedBlocks.append(UnlinkedBlock(block.get()));
1420         m_inlineStackTop->m_caller->m_blockLinkingTargets.append(block.get());
1421     }
1422     m_graph.appendBlock(block);
1423     prepareToParseBlock();
1424 }
1425
1426 void ByteCodeParser::cancelLinkingForBlock(InlineStackEntry* inlineStackEntry, BasicBlock* block)
1427 {
1428     // It's possible that the callsite block head is not owned by the caller.
1429     if (!inlineStackEntry->m_unlinkedBlocks.isEmpty()) {
1430         // It's definitely owned by the caller, because the caller created new blocks.
1431         // Assert that this all adds up.
1432         ASSERT_UNUSED(block, inlineStackEntry->m_unlinkedBlocks.last().m_block == block);
1433         ASSERT(inlineStackEntry->m_unlinkedBlocks.last().m_needsNormalLinking);
1434         inlineStackEntry->m_unlinkedBlocks.last().m_needsNormalLinking = false;
1435     } else {
1436         // It's definitely not owned by the caller. Tell the caller that he does not
1437         // need to link his callsite block head, because we did it for him.
1438         ASSERT(inlineStackEntry->m_callsiteBlockHeadNeedsLinking);
1439         ASSERT_UNUSED(block, inlineStackEntry->m_callsiteBlockHead == block);
1440         inlineStackEntry->m_callsiteBlockHeadNeedsLinking = false;
1441     }
1442 }
1443
1444 template<typename ChecksFunctor>
1445 bool ByteCodeParser::attemptToInlineCall(Node* callTargetNode, int resultOperand, CallVariant callee, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind kind, CallerLinkability callerLinkability, SpeculatedType prediction, unsigned& inliningBalance, const ChecksFunctor& insertChecks)
1446 {
1447     CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1448     
1449     if (!inliningBalance)
1450         return false;
1451     
1452     bool didInsertChecks = false;
1453     auto insertChecksWithAccounting = [&] () {
1454         insertChecks(nullptr);
1455         didInsertChecks = true;
1456     };
1457     
1458     if (verbose)
1459         dataLog("    Considering callee ", callee, "\n");
1460     
1461     // Intrinsics and internal functions can only be inlined if we're not doing varargs. This is because
1462     // we currently don't have any way of getting profiling information for arguments to non-JS varargs
1463     // calls. The prediction propagator won't be of any help because LoadVarargs obscures the data flow,
1464     // and there are no callsite value profiles and native function won't have callee value profiles for
1465     // those arguments. Even worse, if the intrinsic decides to exit, it won't really have anywhere to
1466     // exit to: LoadVarargs is effectful and it's part of the op_call_varargs, so we can't exit without
1467     // calling LoadVarargs twice.
1468     if (!InlineCallFrame::isVarargs(kind)) {
1469         if (InternalFunction* function = callee.internalFunction()) {
1470             if (handleConstantInternalFunction(resultOperand, function, registerOffset, argumentCountIncludingThis, specializationKind, insertChecksWithAccounting)) {
1471                 RELEASE_ASSERT(didInsertChecks);
1472                 addToGraph(Phantom, callTargetNode);
1473                 emitArgumentPhantoms(registerOffset, argumentCountIncludingThis);
1474                 inliningBalance--;
1475                 return true;
1476             }
1477             RELEASE_ASSERT(!didInsertChecks);
1478             return false;
1479         }
1480     
1481         Intrinsic intrinsic = callee.intrinsicFor(specializationKind);
1482         if (intrinsic != NoIntrinsic) {
1483             if (handleIntrinsic(resultOperand, intrinsic, registerOffset, argumentCountIncludingThis, prediction, insertChecksWithAccounting)) {
1484                 RELEASE_ASSERT(didInsertChecks);
1485                 addToGraph(Phantom, callTargetNode);
1486                 emitArgumentPhantoms(registerOffset, argumentCountIncludingThis);
1487                 inliningBalance--;
1488                 return true;
1489             }
1490             RELEASE_ASSERT(!didInsertChecks);
1491             return false;
1492         }
1493     }
1494     
1495     unsigned myInliningCost = inliningCost(callee, argumentCountIncludingThis, specializationKind);
1496     if (myInliningCost > inliningBalance)
1497         return false;
1498
1499     inlineCall(callTargetNode, resultOperand, callee, registerOffset, argumentCountIncludingThis, nextOffset, kind, callerLinkability, insertChecks);
1500     inliningBalance -= myInliningCost;
1501     return true;
1502 }
1503
1504 bool ByteCodeParser::handleInlining(
1505     Node* callTargetNode, int resultOperand, const CallLinkStatus& callLinkStatus,
1506     int registerOffsetOrFirstFreeReg, VirtualRegister thisArgument,
1507     VirtualRegister argumentsArgument, unsigned argumentsOffset, int argumentCountIncludingThis,
1508     unsigned nextOffset, NodeType callOp, InlineCallFrame::Kind kind, SpeculatedType prediction)
1509 {
1510     if (verbose) {
1511         dataLog("Handling inlining...\n");
1512         dataLog("Stack: ", currentCodeOrigin(), "\n");
1513     }
1514     CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
1515     
1516     if (!callLinkStatus.size()) {
1517         if (verbose)
1518             dataLog("Bailing inlining.\n");
1519         return false;
1520     }
1521     
1522     if (InlineCallFrame::isVarargs(kind)
1523         && callLinkStatus.maxNumArguments() > Options::maximumVarargsForInlining()) {
1524         if (verbose)
1525             dataLog("Bailing inlining because of varargs.\n");
1526         return false;
1527     }
1528         
1529     unsigned inliningBalance = Options::maximumFunctionForCallInlineCandidateInstructionCount();
1530     if (specializationKind == CodeForConstruct)
1531         inliningBalance = std::min(inliningBalance, Options::maximumFunctionForConstructInlineCandidateInstructionCount());
1532     if (callLinkStatus.isClosureCall())
1533         inliningBalance = std::min(inliningBalance, Options::maximumFunctionForClosureCallInlineCandidateInstructionCount());
1534     
1535     // First check if we can avoid creating control flow. Our inliner does some CFG
1536     // simplification on the fly and this helps reduce compile times, but we can only leverage
1537     // this in cases where we don't need control flow diamonds to check the callee.
1538     if (!callLinkStatus.couldTakeSlowPath() && callLinkStatus.size() == 1) {
1539         int registerOffset;
1540         
1541         // Only used for varargs calls.
1542         unsigned mandatoryMinimum = 0;
1543         unsigned maxNumArguments = 0;
1544
1545         if (InlineCallFrame::isVarargs(kind)) {
1546             if (FunctionExecutable* functionExecutable = callLinkStatus[0].functionExecutable())
1547                 mandatoryMinimum = functionExecutable->parameterCount();
1548             else
1549                 mandatoryMinimum = 0;
1550             
1551             // includes "this"
1552             maxNumArguments = std::max(
1553                 callLinkStatus.maxNumArguments(),
1554                 mandatoryMinimum + 1);
1555             
1556             // We sort of pretend that this *is* the number of arguments that were passed.
1557             argumentCountIncludingThis = maxNumArguments;
1558             
1559             registerOffset = registerOffsetOrFirstFreeReg + 1;
1560             registerOffset -= maxNumArguments; // includes "this"
1561             registerOffset -= JSStack::CallFrameHeaderSize;
1562             registerOffset = -WTF::roundUpToMultipleOf(
1563                 stackAlignmentRegisters(),
1564                 -registerOffset);
1565         } else
1566             registerOffset = registerOffsetOrFirstFreeReg;
1567         
1568         bool result = attemptToInlineCall(
1569             callTargetNode, resultOperand, callLinkStatus[0], registerOffset,
1570             argumentCountIncludingThis, nextOffset, kind, CallerDoesNormalLinking, prediction,
1571             inliningBalance, [&] (CodeBlock* codeBlock) {
1572                 emitFunctionChecks(callLinkStatus[0], callTargetNode, thisArgument);
1573
1574                 // If we have a varargs call, we want to extract the arguments right now.
1575                 if (InlineCallFrame::isVarargs(kind)) {
1576                     int remappedRegisterOffset =
1577                         m_inlineStackTop->remapOperand(VirtualRegister(registerOffset)).offset();
1578                     
1579                     ensureLocals(VirtualRegister(remappedRegisterOffset).toLocal());
1580                     
1581                     int argumentStart = registerOffset + JSStack::CallFrameHeaderSize;
1582                     int remappedArgumentStart =
1583                         m_inlineStackTop->remapOperand(VirtualRegister(argumentStart)).offset();
1584
1585                     LoadVarargsData* data = m_graph.m_loadVarargsData.add();
1586                     data->start = VirtualRegister(remappedArgumentStart + 1);
1587                     data->count = VirtualRegister(remappedRegisterOffset + JSStack::ArgumentCount);
1588                     data->offset = argumentsOffset;
1589                     data->limit = maxNumArguments;
1590                     data->mandatoryMinimum = mandatoryMinimum;
1591             
1592                     addToGraph(LoadVarargs, OpInfo(data), get(argumentsArgument));
1593
1594                     // LoadVarargs may OSR exit. Hence, we need to keep alive callTargetNode, thisArgument
1595                     // and argumentsArgument for the baseline JIT. However, we only need a Phantom for
1596                     // callTargetNode because the other 2 are still in use and alive at this point.
1597                     addToGraph(Phantom, callTargetNode);
1598
1599                     // In DFG IR before SSA, we cannot insert control flow between after the
1600                     // LoadVarargs and the last SetArgument. This isn't a problem once we get to DFG
1601                     // SSA. Fortunately, we also have other reasons for not inserting control flow
1602                     // before SSA.
1603             
1604                     VariableAccessData* countVariable = newVariableAccessData(
1605                         VirtualRegister(remappedRegisterOffset + JSStack::ArgumentCount));
1606                     // This is pretty lame, but it will force the count to be flushed as an int. This doesn't
1607                     // matter very much, since our use of a SetArgument and Flushes for this local slot is
1608                     // mostly just a formality.
1609                     countVariable->predict(SpecInt32);
1610                     countVariable->mergeIsProfitableToUnbox(true);
1611                     Node* setArgumentCount = addToGraph(SetArgument, OpInfo(countVariable));
1612                     m_currentBlock->variablesAtTail.setOperand(countVariable->local(), setArgumentCount);
1613
1614                     set(VirtualRegister(argumentStart), get(thisArgument), ImmediateNakedSet);
1615                     for (unsigned argument = 1; argument < maxNumArguments; ++argument) {
1616                         VariableAccessData* variable = newVariableAccessData(
1617                             VirtualRegister(remappedArgumentStart + argument));
1618                         variable->mergeShouldNeverUnbox(true); // We currently have nowhere to put the type check on the LoadVarargs. LoadVarargs is effectful, so after it finishes, we cannot exit.
1619                         
1620                         // For a while it had been my intention to do things like this inside the
1621                         // prediction injection phase. But in this case it's really best to do it here,
1622                         // because it's here that we have access to the variable access datas for the
1623                         // inlining we're about to do.
1624                         //
1625                         // Something else that's interesting here is that we'd really love to get
1626                         // predictions from the arguments loaded at the callsite, rather than the
1627                         // arguments received inside the callee. But that probably won't matter for most
1628                         // calls.
1629                         if (codeBlock && argument < static_cast<unsigned>(codeBlock->numParameters())) {
1630                             ConcurrentJITLocker locker(codeBlock->m_lock);
1631                             if (ValueProfile* profile = codeBlock->valueProfileForArgument(argument))
1632                                 variable->predict(profile->computeUpdatedPrediction(locker));
1633                         }
1634                         
1635                         Node* setArgument = addToGraph(SetArgument, OpInfo(variable));
1636                         m_currentBlock->variablesAtTail.setOperand(variable->local(), setArgument);
1637                     }
1638                 }
1639             });
1640         if (verbose) {
1641             dataLog("Done inlining (simple).\n");
1642             dataLog("Stack: ", currentCodeOrigin(), "\n");
1643             dataLog("Result: ", result, "\n");
1644         }
1645         return result;
1646     }
1647     
1648     // We need to create some kind of switch over callee. For now we only do this if we believe that
1649     // we're in the top tier. We have two reasons for this: first, it provides us an opportunity to
1650     // do more detailed polyvariant/polymorphic profiling; and second, it reduces compile times in
1651     // the DFG. And by polyvariant profiling we mean polyvariant profiling of *this* call. Note that
1652     // we could improve that aspect of this by doing polymorphic inlining but having the profiling
1653     // also.
1654     if (!isFTL(m_graph.m_plan.mode) || !Options::enablePolymorphicCallInlining()
1655         || InlineCallFrame::isVarargs(kind)) {
1656         if (verbose) {
1657             dataLog("Bailing inlining (hard).\n");
1658             dataLog("Stack: ", currentCodeOrigin(), "\n");
1659         }
1660         return false;
1661     }
1662     
1663     unsigned oldOffset = m_currentIndex;
1664     
1665     bool allAreClosureCalls = true;
1666     bool allAreDirectCalls = true;
1667     for (unsigned i = callLinkStatus.size(); i--;) {
1668         if (callLinkStatus[i].isClosureCall())
1669             allAreDirectCalls = false;
1670         else
1671             allAreClosureCalls = false;
1672     }
1673     
1674     Node* thingToSwitchOn;
1675     if (allAreDirectCalls)
1676         thingToSwitchOn = callTargetNode;
1677     else if (allAreClosureCalls)
1678         thingToSwitchOn = addToGraph(GetExecutable, callTargetNode);
1679     else {
1680         // FIXME: We should be able to handle this case, but it's tricky and we don't know of cases
1681         // where it would be beneficial. It might be best to handle these cases as if all calls were
1682         // closure calls.
1683         // https://bugs.webkit.org/show_bug.cgi?id=136020
1684         if (verbose) {
1685             dataLog("Bailing inlining (mix).\n");
1686             dataLog("Stack: ", currentCodeOrigin(), "\n");
1687         }
1688         return false;
1689     }
1690     
1691     if (verbose) {
1692         dataLog("Doing hard inlining...\n");
1693         dataLog("Stack: ", currentCodeOrigin(), "\n");
1694     }
1695     
1696     int registerOffset = registerOffsetOrFirstFreeReg;
1697     
1698     // This makes me wish that we were in SSA all the time. We need to pick a variable into which to
1699     // store the callee so that it will be accessible to all of the blocks we're about to create. We
1700     // get away with doing an immediate-set here because we wouldn't have performed any side effects
1701     // yet.
1702     if (verbose)
1703         dataLog("Register offset: ", registerOffset);
1704     VirtualRegister calleeReg(registerOffset + JSStack::Callee);
1705     calleeReg = m_inlineStackTop->remapOperand(calleeReg);
1706     if (verbose)
1707         dataLog("Callee is going to be ", calleeReg, "\n");
1708     setDirect(calleeReg, callTargetNode, ImmediateSetWithFlush);
1709     
1710     SwitchData& data = *m_graph.m_switchData.add();
1711     data.kind = SwitchCell;
1712     addToGraph(Switch, OpInfo(&data), thingToSwitchOn);
1713     
1714     BasicBlock* originBlock = m_currentBlock;
1715     if (verbose)
1716         dataLog("Marking ", RawPointer(originBlock), " as linked (origin of poly inline)\n");
1717     originBlock->didLink();
1718     cancelLinkingForBlock(m_inlineStackTop, originBlock);
1719     
1720     // Each inlined callee will have a landing block that it returns at. They should all have jumps
1721     // to the continuation block, which we create last.
1722     Vector<BasicBlock*> landingBlocks;
1723     
1724     // We may force this true if we give up on inlining any of the edges.
1725     bool couldTakeSlowPath = callLinkStatus.couldTakeSlowPath();
1726     
1727     if (verbose)
1728         dataLog("About to loop over functions at ", currentCodeOrigin(), ".\n");
1729     
1730     for (unsigned i = 0; i < callLinkStatus.size(); ++i) {
1731         m_currentIndex = oldOffset;
1732         RefPtr<BasicBlock> block = adoptRef(new BasicBlock(UINT_MAX, m_numArguments, m_numLocals, PNaN));
1733         m_currentBlock = block.get();
1734         m_graph.appendBlock(block);
1735         prepareToParseBlock();
1736         
1737         Node* myCallTargetNode = getDirect(calleeReg);
1738         
1739         bool inliningResult = attemptToInlineCall(
1740             myCallTargetNode, resultOperand, callLinkStatus[i], registerOffset,
1741             argumentCountIncludingThis, nextOffset, kind, CallerLinksManually, prediction,
1742             inliningBalance, [&] (CodeBlock*) { });
1743         
1744         if (!inliningResult) {
1745             // That failed so we let the block die. Nothing interesting should have been added to
1746             // the block. We also give up on inlining any of the (less frequent) callees.
1747             ASSERT(m_currentBlock == block.get());
1748             ASSERT(m_graph.m_blocks.last() == block);
1749             m_graph.killBlockAndItsContents(block.get());
1750             m_graph.m_blocks.removeLast();
1751             
1752             // The fact that inlining failed means we need a slow path.
1753             couldTakeSlowPath = true;
1754             break;
1755         }
1756         
1757         JSCell* thingToCaseOn;
1758         if (allAreDirectCalls)
1759             thingToCaseOn = callLinkStatus[i].nonExecutableCallee();
1760         else {
1761             ASSERT(allAreClosureCalls);
1762             thingToCaseOn = callLinkStatus[i].executable();
1763         }
1764         data.cases.append(SwitchCase(m_graph.freeze(thingToCaseOn), block.get()));
1765         m_currentIndex = nextOffset;
1766         processSetLocalQueue(); // This only comes into play for intrinsics, since normal inlined code will leave an empty queue.
1767         addToGraph(Jump);
1768         if (verbose)
1769             dataLog("Marking ", RawPointer(m_currentBlock), " as linked (tail of poly inlinee)\n");
1770         m_currentBlock->didLink();
1771         landingBlocks.append(m_currentBlock);
1772
1773         if (verbose)
1774             dataLog("Finished inlining ", callLinkStatus[i], " at ", currentCodeOrigin(), ".\n");
1775     }
1776     
1777     RefPtr<BasicBlock> slowPathBlock = adoptRef(
1778         new BasicBlock(UINT_MAX, m_numArguments, m_numLocals, PNaN));
1779     m_currentIndex = oldOffset;
1780     data.fallThrough = BranchTarget(slowPathBlock.get());
1781     m_graph.appendBlock(slowPathBlock);
1782     if (verbose)
1783         dataLog("Marking ", RawPointer(slowPathBlock.get()), " as linked (slow path block)\n");
1784     slowPathBlock->didLink();
1785     prepareToParseBlock();
1786     m_currentBlock = slowPathBlock.get();
1787     Node* myCallTargetNode = getDirect(calleeReg);
1788     if (couldTakeSlowPath) {
1789         addCall(
1790             resultOperand, callOp, OpInfo(), myCallTargetNode, argumentCountIncludingThis,
1791             registerOffset, prediction);
1792     } else {
1793         addToGraph(CheckBadCell);
1794         addToGraph(Phantom, myCallTargetNode);
1795         emitArgumentPhantoms(registerOffset, argumentCountIncludingThis);
1796         
1797         set(VirtualRegister(resultOperand), addToGraph(BottomValue));
1798     }
1799
1800     m_currentIndex = nextOffset;
1801     processSetLocalQueue();
1802     addToGraph(Jump);
1803     landingBlocks.append(m_currentBlock);
1804     
1805     RefPtr<BasicBlock> continuationBlock = adoptRef(
1806         new BasicBlock(UINT_MAX, m_numArguments, m_numLocals, PNaN));
1807     m_graph.appendBlock(continuationBlock);
1808     if (verbose)
1809         dataLog("Adding unlinked block ", RawPointer(continuationBlock.get()), " (continuation)\n");
1810     m_inlineStackTop->m_unlinkedBlocks.append(UnlinkedBlock(continuationBlock.get()));
1811     prepareToParseBlock();
1812     m_currentBlock = continuationBlock.get();
1813     
1814     for (unsigned i = landingBlocks.size(); i--;)
1815         landingBlocks[i]->terminal()->targetBlock() = continuationBlock.get();
1816     
1817     m_currentIndex = oldOffset;
1818     
1819     if (verbose) {
1820         dataLog("Done inlining (hard).\n");
1821         dataLog("Stack: ", currentCodeOrigin(), "\n");
1822     }
1823     return true;
1824 }
1825
1826 template<typename ChecksFunctor>
1827 bool ByteCodeParser::handleMinMax(int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& insertChecks)
1828 {
1829     if (argumentCountIncludingThis == 1) { // Math.min()
1830         insertChecks();
1831         set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN)));
1832         return true;
1833     }
1834      
1835     if (argumentCountIncludingThis == 2) { // Math.min(x)
1836         insertChecks();
1837         Node* result = get(VirtualRegister(virtualRegisterForArgument(1, registerOffset)));
1838         addToGraph(Phantom, Edge(result, NumberUse));
1839         set(VirtualRegister(resultOperand), result);
1840         return true;
1841     }
1842     
1843     if (argumentCountIncludingThis == 3) { // Math.min(x, y)
1844         insertChecks();
1845         set(VirtualRegister(resultOperand), addToGraph(op, get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset))));
1846         return true;
1847     }
1848     
1849     // Don't handle >=3 arguments for now.
1850     return false;
1851 }
1852
1853 template<typename ChecksFunctor>
1854 bool ByteCodeParser::handleIntrinsic(int resultOperand, Intrinsic intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks)
1855 {
1856     switch (intrinsic) {
1857     case AbsIntrinsic: {
1858         if (argumentCountIncludingThis == 1) { // Math.abs()
1859             insertChecks();
1860             set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN)));
1861             return true;
1862         }
1863
1864         if (!MacroAssembler::supportsFloatingPointAbs())
1865             return false;
1866
1867         insertChecks();
1868         Node* node = addToGraph(ArithAbs, get(virtualRegisterForArgument(1, registerOffset)));
1869         if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
1870             node->mergeFlags(NodeMayOverflowInDFG);
1871         set(VirtualRegister(resultOperand), node);
1872         return true;
1873     }
1874
1875     case MinIntrinsic:
1876         return handleMinMax(resultOperand, ArithMin, registerOffset, argumentCountIncludingThis, insertChecks);
1877         
1878     case MaxIntrinsic:
1879         return handleMinMax(resultOperand, ArithMax, registerOffset, argumentCountIncludingThis, insertChecks);
1880
1881     case SqrtIntrinsic:
1882     case CosIntrinsic:
1883     case SinIntrinsic:
1884     case LogIntrinsic: {
1885         if (argumentCountIncludingThis == 1) {
1886             insertChecks();
1887             set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN)));
1888             return true;
1889         }
1890         
1891         switch (intrinsic) {
1892         case SqrtIntrinsic:
1893             insertChecks();
1894             set(VirtualRegister(resultOperand), addToGraph(ArithSqrt, get(virtualRegisterForArgument(1, registerOffset))));
1895             return true;
1896             
1897         case CosIntrinsic:
1898             insertChecks();
1899             set(VirtualRegister(resultOperand), addToGraph(ArithCos, get(virtualRegisterForArgument(1, registerOffset))));
1900             return true;
1901             
1902         case SinIntrinsic:
1903             insertChecks();
1904             set(VirtualRegister(resultOperand), addToGraph(ArithSin, get(virtualRegisterForArgument(1, registerOffset))));
1905             return true;
1906
1907         case LogIntrinsic:
1908             insertChecks();
1909             set(VirtualRegister(resultOperand), addToGraph(ArithLog, get(virtualRegisterForArgument(1, registerOffset))));
1910             return true;
1911             
1912         default:
1913             RELEASE_ASSERT_NOT_REACHED();
1914             return false;
1915         }
1916     }
1917
1918     case PowIntrinsic: {
1919         if (argumentCountIncludingThis < 3) {
1920             // Math.pow() and Math.pow(x) return NaN.
1921             insertChecks();
1922             set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN)));
1923             return true;
1924         }
1925         insertChecks();
1926         VirtualRegister xOperand = virtualRegisterForArgument(1, registerOffset);
1927         VirtualRegister yOperand = virtualRegisterForArgument(2, registerOffset);
1928         set(VirtualRegister(resultOperand), addToGraph(ArithPow, get(xOperand), get(yOperand)));
1929         return true;
1930     }
1931         
1932     case ArrayPushIntrinsic: {
1933         if (argumentCountIncludingThis != 2)
1934             return false;
1935         
1936         ArrayMode arrayMode = getArrayMode(m_currentInstruction[OPCODE_LENGTH(op_call) - 2].u.arrayProfile);
1937         if (!arrayMode.isJSArray())
1938             return false;
1939         switch (arrayMode.type()) {
1940         case Array::Undecided:
1941         case Array::Int32:
1942         case Array::Double:
1943         case Array::Contiguous:
1944         case Array::ArrayStorage: {
1945             insertChecks();
1946             Node* arrayPush = addToGraph(ArrayPush, OpInfo(arrayMode.asWord()), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)));
1947             set(VirtualRegister(resultOperand), arrayPush);
1948             
1949             return true;
1950         }
1951             
1952         default:
1953             return false;
1954         }
1955     }
1956         
1957     case ArrayPopIntrinsic: {
1958         if (argumentCountIncludingThis != 1)
1959             return false;
1960         
1961         ArrayMode arrayMode = getArrayMode(m_currentInstruction[OPCODE_LENGTH(op_call) - 2].u.arrayProfile);
1962         if (!arrayMode.isJSArray())
1963             return false;
1964         switch (arrayMode.type()) {
1965         case Array::Int32:
1966         case Array::Double:
1967         case Array::Contiguous:
1968         case Array::ArrayStorage: {
1969             insertChecks();
1970             Node* arrayPop = addToGraph(ArrayPop, OpInfo(arrayMode.asWord()), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)));
1971             set(VirtualRegister(resultOperand), arrayPop);
1972             return true;
1973         }
1974             
1975         default:
1976             return false;
1977         }
1978     }
1979
1980     case CharCodeAtIntrinsic: {
1981         if (argumentCountIncludingThis != 2)
1982             return false;
1983
1984         insertChecks();
1985         VirtualRegister thisOperand = virtualRegisterForArgument(0, registerOffset);
1986         VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
1987         Node* charCode = addToGraph(StringCharCodeAt, OpInfo(ArrayMode(Array::String).asWord()), get(thisOperand), get(indexOperand));
1988
1989         set(VirtualRegister(resultOperand), charCode);
1990         return true;
1991     }
1992
1993     case CharAtIntrinsic: {
1994         if (argumentCountIncludingThis != 2)
1995             return false;
1996
1997         insertChecks();
1998         VirtualRegister thisOperand = virtualRegisterForArgument(0, registerOffset);
1999         VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
2000         Node* charCode = addToGraph(StringCharAt, OpInfo(ArrayMode(Array::String).asWord()), get(thisOperand), get(indexOperand));
2001
2002         set(VirtualRegister(resultOperand), charCode);
2003         return true;
2004     }
2005     case Clz32Intrinsic: {
2006         insertChecks();
2007         if (argumentCountIncludingThis == 1)
2008             set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_graph.freeze(jsNumber(32)))));
2009         else {
2010             Node* operand = get(virtualRegisterForArgument(1, registerOffset));
2011             set(VirtualRegister(resultOperand), addToGraph(ArithClz32, operand));
2012         }
2013         return true;
2014     }
2015     case FromCharCodeIntrinsic: {
2016         if (argumentCountIncludingThis != 2)
2017             return false;
2018
2019         insertChecks();
2020         VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset);
2021         Node* charCode = addToGraph(StringFromCharCode, get(indexOperand));
2022
2023         set(VirtualRegister(resultOperand), charCode);
2024
2025         return true;
2026     }
2027
2028     case RegExpExecIntrinsic: {
2029         if (argumentCountIncludingThis != 2)
2030             return false;
2031         
2032         insertChecks();
2033         Node* regExpExec = addToGraph(RegExpExec, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)));
2034         set(VirtualRegister(resultOperand), regExpExec);
2035         
2036         return true;
2037     }
2038         
2039     case RegExpTestIntrinsic: {
2040         if (argumentCountIncludingThis != 2)
2041             return false;
2042         
2043         insertChecks();
2044         Node* regExpExec = addToGraph(RegExpTest, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)));
2045         set(VirtualRegister(resultOperand), regExpExec);
2046         
2047         return true;
2048     }
2049     case RoundIntrinsic: {
2050         if (argumentCountIncludingThis == 1) {
2051             insertChecks();
2052             set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN)));
2053             return true;
2054         }
2055         if (argumentCountIncludingThis == 2) {
2056             insertChecks();
2057             Node* operand = get(virtualRegisterForArgument(1, registerOffset));
2058             Node* roundNode = addToGraph(ArithRound, OpInfo(0), OpInfo(prediction), operand);
2059             set(VirtualRegister(resultOperand), roundNode);
2060             return true;
2061         }
2062         return false;
2063     }
2064     case IMulIntrinsic: {
2065         if (argumentCountIncludingThis != 3)
2066             return false;
2067         insertChecks();
2068         VirtualRegister leftOperand = virtualRegisterForArgument(1, registerOffset);
2069         VirtualRegister rightOperand = virtualRegisterForArgument(2, registerOffset);
2070         Node* left = get(leftOperand);
2071         Node* right = get(rightOperand);
2072         set(VirtualRegister(resultOperand), addToGraph(ArithIMul, left, right));
2073         return true;
2074     }
2075         
2076     case FRoundIntrinsic: {
2077         if (argumentCountIncludingThis != 2)
2078             return false;
2079         insertChecks();
2080         VirtualRegister operand = virtualRegisterForArgument(1, registerOffset);
2081         set(VirtualRegister(resultOperand), addToGraph(ArithFRound, get(operand)));
2082         return true;
2083     }
2084         
2085     case DFGTrueIntrinsic: {
2086         insertChecks();
2087         set(VirtualRegister(resultOperand), jsConstant(jsBoolean(true)));
2088         return true;
2089     }
2090         
2091     case OSRExitIntrinsic: {
2092         insertChecks();
2093         addToGraph(ForceOSRExit);
2094         set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantUndefined)));
2095         return true;
2096     }
2097         
2098     case IsFinalTierIntrinsic: {
2099         insertChecks();
2100         set(VirtualRegister(resultOperand),
2101             jsConstant(jsBoolean(Options::useFTLJIT() ? isFTL(m_graph.m_plan.mode) : true)));
2102         return true;
2103     }
2104         
2105     case SetInt32HeapPredictionIntrinsic: {
2106         insertChecks();
2107         for (int i = 1; i < argumentCountIncludingThis; ++i) {
2108             Node* node = get(virtualRegisterForArgument(i, registerOffset));
2109             if (node->hasHeapPrediction())
2110                 node->setHeapPrediction(SpecInt32);
2111         }
2112         set(VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantUndefined)));
2113         return true;
2114     }
2115         
2116     case CheckInt32Intrinsic: {
2117         insertChecks();
2118         for (int i = 1; i < argumentCountIncludingThis; ++i) {
2119             Node* node = get(virtualRegisterForArgument(i, registerOffset));
2120             addToGraph(Phantom, Edge(node, Int32Use));
2121         }
2122         set(VirtualRegister(resultOperand), jsConstant(jsBoolean(true)));
2123         return true;
2124     }
2125         
2126     case FiatInt52Intrinsic: {
2127         if (argumentCountIncludingThis != 2)
2128             return false;
2129         insertChecks();
2130         VirtualRegister operand = virtualRegisterForArgument(1, registerOffset);
2131         if (enableInt52())
2132             set(VirtualRegister(resultOperand), addToGraph(FiatInt52, get(operand)));
2133         else
2134             set(VirtualRegister(resultOperand), get(operand));
2135         return true;
2136     }
2137         
2138     default:
2139         return false;
2140     }
2141 }
2142
2143 template<typename ChecksFunctor>
2144 bool ByteCodeParser::handleTypedArrayConstructor(
2145     int resultOperand, InternalFunction* function, int registerOffset,
2146     int argumentCountIncludingThis, TypedArrayType type, const ChecksFunctor& insertChecks)
2147 {
2148     if (!isTypedView(type))
2149         return false;
2150     
2151     if (function->classInfo() != constructorClassInfoForType(type))
2152         return false;
2153     
2154     if (function->globalObject() != m_inlineStackTop->m_codeBlock->globalObject())
2155         return false;
2156     
2157     // We only have an intrinsic for the case where you say:
2158     //
2159     // new FooArray(blah);
2160     //
2161     // Of course, 'blah' could be any of the following:
2162     //
2163     // - Integer, indicating that you want to allocate an array of that length.
2164     //   This is the thing we're hoping for, and what we can actually do meaningful
2165     //   optimizations for.
2166     //
2167     // - Array buffer, indicating that you want to create a view onto that _entire_
2168     //   buffer.
2169     //
2170     // - Non-buffer object, indicating that you want to create a copy of that
2171     //   object by pretending that it quacks like an array.
2172     //
2173     // - Anything else, indicating that you want to have an exception thrown at
2174     //   you.
2175     //
2176     // The intrinsic, NewTypedArray, will behave as if it could do any of these
2177     // things up until we do Fixup. Thereafter, if child1 (i.e. 'blah') is
2178     // predicted Int32, then we lock it in as a normal typed array allocation.
2179     // Otherwise, NewTypedArray turns into a totally opaque function call that
2180     // may clobber the world - by virtue of it accessing properties on what could
2181     // be an object.
2182     //
2183     // Note that although the generic form of NewTypedArray sounds sort of awful,
2184     // it is actually quite likely to be more efficient than a fully generic
2185     // Construct. So, we might want to think about making NewTypedArray variadic,
2186     // or else making Construct not super slow.
2187     
2188     if (argumentCountIncludingThis != 2)
2189         return false;
2190
2191     insertChecks();
2192     set(VirtualRegister(resultOperand),
2193         addToGraph(NewTypedArray, OpInfo(type), get(virtualRegisterForArgument(1, registerOffset))));
2194     return true;
2195 }
2196
2197 template<typename ChecksFunctor>
2198 bool ByteCodeParser::handleConstantInternalFunction(
2199     int resultOperand, InternalFunction* function, int registerOffset,
2200     int argumentCountIncludingThis, CodeSpecializationKind kind, const ChecksFunctor& insertChecks)
2201 {
2202     if (verbose)
2203         dataLog("    Handling constant internal function ", JSValue(function), "\n");
2204     
2205     // If we ever find that we have a lot of internal functions that we specialize for,
2206     // then we should probably have some sort of hashtable dispatch, or maybe even
2207     // dispatch straight through the MethodTable of the InternalFunction. But for now,
2208     // it seems that this case is hit infrequently enough, and the number of functions
2209     // we know about is small enough, that having just a linear cascade of if statements
2210     // is good enough.
2211     
2212     if (function->classInfo() == ArrayConstructor::info()) {
2213         if (function->globalObject() != m_inlineStackTop->m_codeBlock->globalObject())
2214             return false;
2215         
2216         insertChecks();
2217         if (argumentCountIncludingThis == 2) {
2218             set(VirtualRegister(resultOperand),
2219                 addToGraph(NewArrayWithSize, OpInfo(ArrayWithUndecided), get(virtualRegisterForArgument(1, registerOffset))));
2220             return true;
2221         }
2222         
2223         // FIXME: Array constructor should use "this" as newTarget.
2224         for (int i = 1; i < argumentCountIncludingThis; ++i)
2225             addVarArgChild(get(virtualRegisterForArgument(i, registerOffset)));
2226         set(VirtualRegister(resultOperand),
2227             addToGraph(Node::VarArg, NewArray, OpInfo(ArrayWithUndecided), OpInfo(0)));
2228         return true;
2229     }
2230     
2231     if (function->classInfo() == StringConstructor::info()) {
2232         insertChecks();
2233         
2234         Node* result;
2235         
2236         if (argumentCountIncludingThis <= 1)
2237             result = jsConstant(m_vm->smallStrings.emptyString());
2238         else
2239             result = addToGraph(CallStringConstructor, get(virtualRegisterForArgument(1, registerOffset)));
2240         
2241         if (kind == CodeForConstruct)
2242             result = addToGraph(NewStringObject, OpInfo(function->globalObject()->stringObjectStructure()), result);
2243         
2244         set(VirtualRegister(resultOperand), result);
2245         return true;
2246     }
2247     
2248     for (unsigned typeIndex = 0; typeIndex < NUMBER_OF_TYPED_ARRAY_TYPES; ++typeIndex) {
2249         bool result = handleTypedArrayConstructor(
2250             resultOperand, function, registerOffset, argumentCountIncludingThis,
2251             indexToTypedArrayType(typeIndex), insertChecks);
2252         if (result)
2253             return true;
2254     }
2255     
2256     return false;
2257 }
2258
2259 Node* ByteCodeParser::handleGetByOffset(SpeculatedType prediction, Node* base, const StructureSet& structureSet, unsigned identifierNumber, PropertyOffset offset, NodeType op)
2260 {
2261     if (base->hasConstant()) {
2262         if (JSValue constant = m_graph.tryGetConstantProperty(base->asJSValue(), structureSet, offset)) {
2263             addToGraph(Phantom, base);
2264             return weakJSConstant(constant);
2265         }
2266     }
2267     
2268     Node* propertyStorage;
2269     if (isInlineOffset(offset))
2270         propertyStorage = base;
2271     else
2272         propertyStorage = addToGraph(GetButterfly, base);
2273     
2274     StorageAccessData* data = m_graph.m_storageAccessData.add();
2275     data->offset = offset;
2276     data->identifierNumber = identifierNumber;
2277     
2278     Node* getByOffset = addToGraph(op, OpInfo(data), OpInfo(prediction), propertyStorage, base);
2279
2280     return getByOffset;
2281 }
2282
2283 Node* ByteCodeParser::handlePutByOffset(Node* base, unsigned identifier, PropertyOffset offset, Node* value)
2284 {
2285     Node* propertyStorage;
2286     if (isInlineOffset(offset))
2287         propertyStorage = base;
2288     else
2289         propertyStorage = addToGraph(GetButterfly, base);
2290     
2291     StorageAccessData* data = m_graph.m_storageAccessData.add();
2292     data->offset = offset;
2293     data->identifierNumber = identifier;
2294     
2295     Node* result = addToGraph(PutByOffset, OpInfo(data), propertyStorage, base, value);
2296     
2297     return result;
2298 }
2299
2300 void ByteCodeParser::emitChecks(const ConstantStructureCheckVector& vector)
2301 {
2302     for (unsigned i = 0; i < vector.size(); ++i)
2303         cellConstantWithStructureCheck(vector[i].constant(), vector[i].structure());
2304 }
2305
2306 void ByteCodeParser::handleGetById(
2307     int destinationOperand, SpeculatedType prediction, Node* base, unsigned identifierNumber,
2308     const GetByIdStatus& getByIdStatus)
2309 {
2310     NodeType getById = getByIdStatus.makesCalls() ? GetByIdFlush : GetById;
2311     
2312     if (!getByIdStatus.isSimple() || !getByIdStatus.numVariants() || !Options::enableAccessInlining()) {
2313         set(VirtualRegister(destinationOperand),
2314             addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base));
2315         return;
2316     }
2317     
2318     if (getByIdStatus.numVariants() > 1) {
2319         if (getByIdStatus.makesCalls() || !isFTL(m_graph.m_plan.mode)
2320             || !Options::enablePolymorphicAccessInlining()) {
2321             set(VirtualRegister(destinationOperand),
2322                 addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base));
2323             return;
2324         }
2325         
2326         if (m_graph.compilation())
2327             m_graph.compilation()->noticeInlinedGetById();
2328     
2329         // 1) Emit prototype structure checks for all chains. This could sort of maybe not be
2330         //    optimal, if there is some rarely executed case in the chain that requires a lot
2331         //    of checks and those checks are not watchpointable.
2332         for (unsigned variantIndex = getByIdStatus.numVariants(); variantIndex--;)
2333             emitChecks(getByIdStatus[variantIndex].constantChecks());
2334         
2335         // 2) Emit a MultiGetByOffset
2336         MultiGetByOffsetData* data = m_graph.m_multiGetByOffsetData.add();
2337         data->variants = getByIdStatus.variants();
2338         data->identifierNumber = identifierNumber;
2339         set(VirtualRegister(destinationOperand),
2340             addToGraph(MultiGetByOffset, OpInfo(data), OpInfo(prediction), base));
2341         return;
2342     }
2343     
2344     ASSERT(getByIdStatus.numVariants() == 1);
2345     GetByIdVariant variant = getByIdStatus[0];
2346                 
2347     if (m_graph.compilation())
2348         m_graph.compilation()->noticeInlinedGetById();
2349     
2350     Node* originalBase = base;
2351                 
2352     addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(variant.structureSet())), base);
2353     
2354     emitChecks(variant.constantChecks());
2355
2356     if (variant.alternateBase())
2357         base = weakJSConstant(variant.alternateBase());
2358     
2359     // Unless we want bugs like https://bugs.webkit.org/show_bug.cgi?id=88783, we need to
2360     // ensure that the base of the original get_by_id is kept alive until we're done with
2361     // all of the speculations. We only insert the Phantom if there had been a CheckStructure
2362     // on something other than the base following the CheckStructure on base.
2363     if (originalBase != base)
2364         addToGraph(Phantom, originalBase);
2365     
2366     Node* loadedValue = handleGetByOffset(
2367         variant.callLinkStatus() ? SpecCellOther : prediction,
2368         base, variant.baseStructure(), identifierNumber, variant.offset(),
2369         variant.callLinkStatus() ? GetGetterSetterByOffset : GetByOffset);
2370     
2371     if (!variant.callLinkStatus()) {
2372         set(VirtualRegister(destinationOperand), loadedValue);
2373         return;
2374     }
2375     
2376     Node* getter = addToGraph(GetGetter, loadedValue);
2377     
2378     // Make a call. We don't try to get fancy with using the smallest operand number because
2379     // the stack layout phase should compress the stack anyway.
2380     
2381     unsigned numberOfParameters = 0;
2382     numberOfParameters++; // The 'this' argument.
2383     numberOfParameters++; // True return PC.
2384     
2385     // Start with a register offset that corresponds to the last in-use register.
2386     int registerOffset = virtualRegisterForLocal(
2387         m_inlineStackTop->m_profiledBlock->m_numCalleeRegisters - 1).offset();
2388     registerOffset -= numberOfParameters;
2389     registerOffset -= JSStack::CallFrameHeaderSize;
2390     
2391     // Get the alignment right.
2392     registerOffset = -WTF::roundUpToMultipleOf(
2393         stackAlignmentRegisters(),
2394         -registerOffset);
2395     
2396     ensureLocals(
2397         m_inlineStackTop->remapOperand(
2398             VirtualRegister(registerOffset)).toLocal());
2399     
2400     // Issue SetLocals. This has two effects:
2401     // 1) That's how handleCall() sees the arguments.
2402     // 2) If we inline then this ensures that the arguments are flushed so that if you use
2403     //    the dreaded arguments object on the getter, the right things happen. Well, sort of -
2404     //    since we only really care about 'this' in this case. But we're not going to take that
2405     //    shortcut.
2406     int nextRegister = registerOffset + JSStack::CallFrameHeaderSize;
2407     set(VirtualRegister(nextRegister++), originalBase, ImmediateNakedSet);
2408     
2409     handleCall(
2410         destinationOperand, Call, InlineCallFrame::GetterCall, OPCODE_LENGTH(op_get_by_id),
2411         getter, numberOfParameters - 1, registerOffset, *variant.callLinkStatus(), prediction);
2412 }
2413
2414 void ByteCodeParser::emitPutById(
2415     Node* base, unsigned identifierNumber, Node* value, const PutByIdStatus& putByIdStatus, bool isDirect)
2416 {
2417     if (isDirect)
2418         addToGraph(PutByIdDirect, OpInfo(identifierNumber), base, value);
2419     else
2420         addToGraph(putByIdStatus.makesCalls() ? PutByIdFlush : PutById, OpInfo(identifierNumber), base, value);
2421 }
2422
2423 void ByteCodeParser::handlePutById(
2424     Node* base, unsigned identifierNumber, Node* value,
2425     const PutByIdStatus& putByIdStatus, bool isDirect)
2426 {
2427     if (!putByIdStatus.isSimple() || !putByIdStatus.numVariants() || !Options::enableAccessInlining()) {
2428         if (!putByIdStatus.isSet())
2429             addToGraph(ForceOSRExit);
2430         emitPutById(base, identifierNumber, value, putByIdStatus, isDirect);
2431         return;
2432     }
2433     
2434     if (putByIdStatus.numVariants() > 1) {
2435         if (!isFTL(m_graph.m_plan.mode) || putByIdStatus.makesCalls()
2436             || !Options::enablePolymorphicAccessInlining()) {
2437             emitPutById(base, identifierNumber, value, putByIdStatus, isDirect);
2438             return;
2439         }
2440         
2441         if (m_graph.compilation())
2442             m_graph.compilation()->noticeInlinedPutById();
2443         
2444         if (!isDirect) {
2445             for (unsigned variantIndex = putByIdStatus.numVariants(); variantIndex--;) {
2446                 if (putByIdStatus[variantIndex].kind() != PutByIdVariant::Transition)
2447                     continue;
2448                 emitChecks(putByIdStatus[variantIndex].constantChecks());
2449             }
2450         }
2451         
2452         MultiPutByOffsetData* data = m_graph.m_multiPutByOffsetData.add();
2453         data->variants = putByIdStatus.variants();
2454         data->identifierNumber = identifierNumber;
2455         addToGraph(MultiPutByOffset, OpInfo(data), base, value);
2456         return;
2457     }
2458     
2459     ASSERT(putByIdStatus.numVariants() == 1);
2460     const PutByIdVariant& variant = putByIdStatus[0];
2461     
2462     switch (variant.kind()) {
2463     case PutByIdVariant::Replace: {
2464         addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(variant.structure())), base);
2465         handlePutByOffset(base, identifierNumber, variant.offset(), value);
2466         if (m_graph.compilation())
2467             m_graph.compilation()->noticeInlinedPutById();
2468         return;
2469     }
2470     
2471     case PutByIdVariant::Transition: {
2472         addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(variant.oldStructure())), base);
2473         emitChecks(variant.constantChecks());
2474
2475         ASSERT(variant.oldStructureForTransition()->transitionWatchpointSetHasBeenInvalidated());
2476     
2477         Node* propertyStorage;
2478         Transition* transition = m_graph.m_transitions.add(
2479             variant.oldStructureForTransition(), variant.newStructure());
2480
2481         if (variant.reallocatesStorage()) {
2482
2483             // If we're growing the property storage then it must be because we're
2484             // storing into the out-of-line storage.
2485             ASSERT(!isInlineOffset(variant.offset()));
2486
2487             if (!variant.oldStructureForTransition()->outOfLineCapacity()) {
2488                 propertyStorage = addToGraph(
2489                     AllocatePropertyStorage, OpInfo(transition), base);
2490             } else {
2491                 propertyStorage = addToGraph(
2492                     ReallocatePropertyStorage, OpInfo(transition),
2493                     base, addToGraph(GetButterfly, base));
2494             }
2495         } else {
2496             if (isInlineOffset(variant.offset()))
2497                 propertyStorage = base;
2498             else
2499                 propertyStorage = addToGraph(GetButterfly, base);
2500         }
2501
2502         StorageAccessData* data = m_graph.m_storageAccessData.add();
2503         data->offset = variant.offset();
2504         data->identifierNumber = identifierNumber;
2505         
2506         addToGraph(
2507             PutByOffset,
2508             OpInfo(data),
2509             propertyStorage,
2510             base,
2511             value);
2512
2513         // FIXME: PutStructure goes last until we fix either
2514         // https://bugs.webkit.org/show_bug.cgi?id=142921 or
2515         // https://bugs.webkit.org/show_bug.cgi?id=142924.
2516         addToGraph(PutStructure, OpInfo(transition), base);
2517
2518         if (m_graph.compilation())
2519             m_graph.compilation()->noticeInlinedPutById();
2520         return;
2521     }
2522         
2523     case PutByIdVariant::Setter: {
2524         Node* originalBase = base;
2525         
2526         addToGraph(
2527             CheckStructure, OpInfo(m_graph.addStructureSet(variant.structure())), base);
2528         
2529         emitChecks(variant.constantChecks());
2530         
2531         if (variant.alternateBase())
2532             base = weakJSConstant(variant.alternateBase());
2533         
2534         Node* loadedValue = handleGetByOffset(
2535             SpecCellOther, base, variant.baseStructure(), identifierNumber, variant.offset(),
2536             GetGetterSetterByOffset);
2537         
2538         Node* setter = addToGraph(GetSetter, loadedValue);
2539         
2540         // Make a call. We don't try to get fancy with using the smallest operand number because
2541         // the stack layout phase should compress the stack anyway.
2542     
2543         unsigned numberOfParameters = 0;
2544         numberOfParameters++; // The 'this' argument.
2545         numberOfParameters++; // The new value.
2546         numberOfParameters++; // True return PC.
2547     
2548         // Start with a register offset that corresponds to the last in-use register.
2549         int registerOffset = virtualRegisterForLocal(
2550             m_inlineStackTop->m_profiledBlock->m_numCalleeRegisters - 1).offset();
2551         registerOffset -= numberOfParameters;
2552         registerOffset -= JSStack::CallFrameHeaderSize;
2553     
2554         // Get the alignment right.
2555         registerOffset = -WTF::roundUpToMultipleOf(
2556             stackAlignmentRegisters(),
2557             -registerOffset);
2558     
2559         ensureLocals(
2560             m_inlineStackTop->remapOperand(
2561                 VirtualRegister(registerOffset)).toLocal());
2562     
2563         int nextRegister = registerOffset + JSStack::CallFrameHeaderSize;
2564         set(VirtualRegister(nextRegister++), originalBase, ImmediateNakedSet);
2565         set(VirtualRegister(nextRegister++), value, ImmediateNakedSet);
2566     
2567         handleCall(
2568             VirtualRegister().offset(), Call, InlineCallFrame::SetterCall,
2569             OPCODE_LENGTH(op_put_by_id), setter, numberOfParameters - 1, registerOffset,
2570             *variant.callLinkStatus(), SpecOther);
2571         return;
2572     }
2573     
2574     default: {
2575         emitPutById(base, identifierNumber, value, putByIdStatus, isDirect);
2576         return;
2577     } }
2578 }
2579
2580 void ByteCodeParser::prepareToParseBlock()
2581 {
2582     clearCaches();
2583     ASSERT(m_setLocalQueue.isEmpty());
2584 }
2585
2586 void ByteCodeParser::clearCaches()
2587 {
2588     m_constants.resize(0);
2589 }
2590
2591 bool ByteCodeParser::parseBlock(unsigned limit)
2592 {
2593     bool shouldContinueParsing = true;
2594
2595     Interpreter* interpreter = m_vm->interpreter;
2596     Instruction* instructionsBegin = m_inlineStackTop->m_codeBlock->instructions().begin();
2597     unsigned blockBegin = m_currentIndex;
2598     
2599     // If we are the first basic block, introduce markers for arguments. This allows
2600     // us to track if a use of an argument may use the actual argument passed, as
2601     // opposed to using a value we set explicitly.
2602     if (m_currentBlock == m_graph.block(0) && !inlineCallFrame()) {
2603         m_graph.m_arguments.resize(m_numArguments);
2604         for (unsigned argument = 0; argument < m_numArguments; ++argument) {
2605             VariableAccessData* variable = newVariableAccessData(
2606                 virtualRegisterForArgument(argument));
2607             variable->mergeStructureCheckHoistingFailed(
2608                 m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
2609             variable->mergeCheckArrayHoistingFailed(
2610                 m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIndexingType));
2611             
2612             Node* setArgument = addToGraph(SetArgument, OpInfo(variable));
2613             m_graph.m_arguments[argument] = setArgument;
2614             m_currentBlock->variablesAtTail.setArgumentFirstTime(argument, setArgument);
2615         }
2616     }
2617
2618     while (true) {
2619         processSetLocalQueue();
2620         
2621         // Don't extend over jump destinations.
2622         if (m_currentIndex == limit) {
2623             // Ordinarily we want to plant a jump. But refuse to do this if the block is
2624             // empty. This is a special case for inlining, which might otherwise create
2625             // some empty blocks in some cases. When parseBlock() returns with an empty
2626             // block, it will get repurposed instead of creating a new one. Note that this
2627             // logic relies on every bytecode resulting in one or more nodes, which would
2628             // be true anyway except for op_loop_hint, which emits a Phantom to force this
2629             // to be true.
2630             if (!m_currentBlock->isEmpty())
2631                 addToGraph(Jump, OpInfo(m_currentIndex));
2632             return shouldContinueParsing;
2633         }
2634         
2635         // Switch on the current bytecode opcode.
2636         Instruction* currentInstruction = instructionsBegin + m_currentIndex;
2637         m_currentInstruction = currentInstruction; // Some methods want to use this, and we'd rather not thread it through calls.
2638         OpcodeID opcodeID = interpreter->getOpcodeID(currentInstruction->u.opcode);
2639         
2640         if (Options::verboseDFGByteCodeParsing())
2641             dataLog("    parsing ", currentCodeOrigin(), "\n");
2642         
2643         if (m_graph.compilation()) {
2644             addToGraph(CountExecution, OpInfo(m_graph.compilation()->executionCounterFor(
2645                 Profiler::OriginStack(*m_vm->m_perBytecodeProfiler, m_codeBlock, currentCodeOrigin()))));
2646         }
2647         
2648         switch (opcodeID) {
2649
2650         // === Function entry opcodes ===
2651
2652         case op_enter: {
2653             Node* undefined = addToGraph(JSConstant, OpInfo(m_constantUndefined));
2654             // Initialize all locals to undefined.
2655             for (int i = 0; i < m_inlineStackTop->m_codeBlock->m_numVars; ++i)
2656                 set(virtualRegisterForLocal(i), undefined, ImmediateNakedSet);
2657             NEXT_OPCODE(op_enter);
2658         }
2659             
2660         case op_to_this: {
2661             Node* op1 = getThis();
2662             if (op1->op() != ToThis) {
2663                 Structure* cachedStructure = currentInstruction[2].u.structure.get();
2664                 if (currentInstruction[2].u.toThisStatus != ToThisOK
2665                     || !cachedStructure
2666                     || cachedStructure->classInfo()->methodTable.toThis != JSObject::info()->methodTable.toThis
2667                     || m_inlineStackTop->m_profiledBlock->couldTakeSlowCase(m_currentIndex)
2668                     || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)
2669                     || (op1->op() == GetLocal && op1->variableAccessData()->structureCheckHoistingFailed())) {
2670                     setThis(addToGraph(ToThis, op1));
2671                 } else {
2672                     addToGraph(
2673                         CheckStructure,
2674                         OpInfo(m_graph.addStructureSet(cachedStructure)),
2675                         op1);
2676                 }
2677             }
2678             NEXT_OPCODE(op_to_this);
2679         }
2680
2681         case op_create_this: {
2682             int calleeOperand = currentInstruction[2].u.operand;
2683             Node* callee = get(VirtualRegister(calleeOperand));
2684
2685             JSFunction* function = callee->dynamicCastConstant<JSFunction*>();
2686             if (!function) {
2687                 JSCell* cachedFunction = currentInstruction[4].u.jsCell.unvalidatedGet();
2688                 if (cachedFunction
2689                     && cachedFunction != JSCell::seenMultipleCalleeObjects()
2690                     && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell)) {
2691                     ASSERT(cachedFunction->inherits(JSFunction::info()));
2692
2693                     FrozenValue* frozen = m_graph.freeze(cachedFunction);
2694                     addToGraph(CheckCell, OpInfo(frozen), callee);
2695                     set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(JSConstant, OpInfo(frozen)));
2696
2697                     function = static_cast<JSFunction*>(cachedFunction);
2698                 }
2699             }
2700
2701             bool alreadyEmitted = false;
2702             if (function) {
2703                 if (FunctionRareData* rareData = function->rareData()) {
2704                     if (Structure* structure = rareData->allocationStructure()) {
2705                         m_graph.freeze(rareData);
2706                         m_graph.watchpoints().addLazily(rareData->allocationProfileWatchpointSet());
2707                         // The callee is still live up to this point.
2708                         addToGraph(Phantom, callee);
2709                         set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(NewObject, OpInfo(structure)));
2710                         alreadyEmitted = true;
2711                     }
2712                 }
2713             }
2714             if (!alreadyEmitted) {
2715                 set(VirtualRegister(currentInstruction[1].u.operand),
2716                     addToGraph(CreateThis, OpInfo(currentInstruction[3].u.operand), callee));
2717             }
2718             NEXT_OPCODE(op_create_this);
2719         }
2720
2721         case op_new_object: {
2722             set(VirtualRegister(currentInstruction[1].u.operand),
2723                 addToGraph(NewObject,
2724                     OpInfo(currentInstruction[3].u.objectAllocationProfile->structure())));
2725             NEXT_OPCODE(op_new_object);
2726         }
2727             
2728         case op_new_array: {
2729             int startOperand = currentInstruction[2].u.operand;
2730             int numOperands = currentInstruction[3].u.operand;
2731             ArrayAllocationProfile* profile = currentInstruction[4].u.arrayAllocationProfile;
2732             for (int operandIdx = startOperand; operandIdx > startOperand - numOperands; --operandIdx)
2733                 addVarArgChild(get(VirtualRegister(operandIdx)));
2734             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(Node::VarArg, NewArray, OpInfo(profile->selectIndexingType()), OpInfo(0)));
2735             NEXT_OPCODE(op_new_array);
2736         }
2737             
2738         case op_new_array_with_size: {
2739             int lengthOperand = currentInstruction[2].u.operand;
2740             ArrayAllocationProfile* profile = currentInstruction[3].u.arrayAllocationProfile;
2741             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(NewArrayWithSize, OpInfo(profile->selectIndexingType()), get(VirtualRegister(lengthOperand))));
2742             NEXT_OPCODE(op_new_array_with_size);
2743         }
2744             
2745         case op_new_array_buffer: {
2746             int startConstant = currentInstruction[2].u.operand;
2747             int numConstants = currentInstruction[3].u.operand;
2748             ArrayAllocationProfile* profile = currentInstruction[4].u.arrayAllocationProfile;
2749             NewArrayBufferData data;
2750             data.startConstant = m_inlineStackTop->m_constantBufferRemap[startConstant];
2751             data.numConstants = numConstants;
2752             data.indexingType = profile->selectIndexingType();
2753
2754             // If this statement has never executed, we'll have the wrong indexing type in the profile.
2755             for (int i = 0; i < numConstants; ++i) {
2756                 data.indexingType =
2757                     leastUpperBoundOfIndexingTypeAndValue(
2758                         data.indexingType,
2759                         m_codeBlock->constantBuffer(data.startConstant)[i]);
2760             }
2761             
2762             m_graph.m_newArrayBufferData.append(data);
2763             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(NewArrayBuffer, OpInfo(&m_graph.m_newArrayBufferData.last())));
2764             NEXT_OPCODE(op_new_array_buffer);
2765         }
2766             
2767         case op_new_regexp: {
2768             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(NewRegexp, OpInfo(currentInstruction[2].u.operand)));
2769             NEXT_OPCODE(op_new_regexp);
2770         }
2771             
2772         // === Bitwise operations ===
2773
2774         case op_bitand: {
2775             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2776             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2777             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(BitAnd, op1, op2));
2778             NEXT_OPCODE(op_bitand);
2779         }
2780
2781         case op_bitor: {
2782             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2783             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2784             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(BitOr, op1, op2));
2785             NEXT_OPCODE(op_bitor);
2786         }
2787
2788         case op_bitxor: {
2789             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2790             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2791             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(BitXor, op1, op2));
2792             NEXT_OPCODE(op_bitxor);
2793         }
2794
2795         case op_rshift: {
2796             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2797             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2798             set(VirtualRegister(currentInstruction[1].u.operand),
2799                 addToGraph(BitRShift, op1, op2));
2800             NEXT_OPCODE(op_rshift);
2801         }
2802
2803         case op_lshift: {
2804             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2805             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2806             set(VirtualRegister(currentInstruction[1].u.operand),
2807                 addToGraph(BitLShift, op1, op2));
2808             NEXT_OPCODE(op_lshift);
2809         }
2810
2811         case op_urshift: {
2812             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2813             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2814             set(VirtualRegister(currentInstruction[1].u.operand),
2815                 addToGraph(BitURShift, op1, op2));
2816             NEXT_OPCODE(op_urshift);
2817         }
2818             
2819         case op_unsigned: {
2820             set(VirtualRegister(currentInstruction[1].u.operand),
2821                 makeSafe(addToGraph(UInt32ToNumber, get(VirtualRegister(currentInstruction[2].u.operand)))));
2822             NEXT_OPCODE(op_unsigned);
2823         }
2824
2825         // === Increment/Decrement opcodes ===
2826
2827         case op_inc: {
2828             int srcDst = currentInstruction[1].u.operand;
2829             VirtualRegister srcDstVirtualRegister = VirtualRegister(srcDst);
2830             Node* op = get(srcDstVirtualRegister);
2831             set(srcDstVirtualRegister, makeSafe(addToGraph(ArithAdd, op, addToGraph(JSConstant, OpInfo(m_constantOne)))));
2832             NEXT_OPCODE(op_inc);
2833         }
2834
2835         case op_dec: {
2836             int srcDst = currentInstruction[1].u.operand;
2837             VirtualRegister srcDstVirtualRegister = VirtualRegister(srcDst);
2838             Node* op = get(srcDstVirtualRegister);
2839             set(srcDstVirtualRegister, makeSafe(addToGraph(ArithSub, op, addToGraph(JSConstant, OpInfo(m_constantOne)))));
2840             NEXT_OPCODE(op_dec);
2841         }
2842
2843         // === Arithmetic operations ===
2844
2845         case op_add: {
2846             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2847             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2848             if (op1->hasNumberResult() && op2->hasNumberResult())
2849                 set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithAdd, op1, op2)));
2850             else
2851                 set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ValueAdd, op1, op2)));
2852             NEXT_OPCODE(op_add);
2853         }
2854
2855         case op_sub: {
2856             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2857             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2858             set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithSub, op1, op2)));
2859             NEXT_OPCODE(op_sub);
2860         }
2861
2862         case op_negate: {
2863             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2864             set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithNegate, op1)));
2865             NEXT_OPCODE(op_negate);
2866         }
2867
2868         case op_mul: {
2869             // Multiply requires that the inputs are not truncated, unfortunately.
2870             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2871             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2872             set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithMul, op1, op2)));
2873             NEXT_OPCODE(op_mul);
2874         }
2875
2876         case op_mod: {
2877             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2878             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2879             set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithMod, op1, op2)));
2880             NEXT_OPCODE(op_mod);
2881         }
2882
2883         case op_div: {
2884             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
2885             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
2886             set(VirtualRegister(currentInstruction[1].u.operand), makeDivSafe(addToGraph(ArithDiv, op1, op2)));
2887             NEXT_OPCODE(op_div);
2888         }
2889
2890         // === Misc operations ===
2891
2892         case op_debug:
2893             addToGraph(Breakpoint);
2894             NEXT_OPCODE(op_debug);
2895
2896         case op_profile_will_call: {
2897             addToGraph(ProfileWillCall);
2898             NEXT_OPCODE(op_profile_will_call);
2899         }
2900
2901         case op_profile_did_call: {
2902             addToGraph(ProfileDidCall);
2903             NEXT_OPCODE(op_profile_did_call);
2904         }
2905
2906         case op_mov: {
2907             Node* op = get(VirtualRegister(currentInstruction[2].u.operand));
2908             set(VirtualRegister(currentInstruction[1].u.operand), op);
2909             NEXT_OPCODE(op_mov);
2910         }
2911
2912         case op_check_tdz: {
2913             Node* op = get(VirtualRegister(currentInstruction[1].u.operand));
2914             addToGraph(CheckNotEmpty, op);
2915             NEXT_OPCODE(op_check_tdz);
2916         }
2917
2918         case op_check_has_instance:
2919             addToGraph(CheckHasInstance, get(VirtualRegister(currentInstruction[3].u.operand)));
2920             NEXT_OPCODE(op_check_has_instance);
2921
2922         case op_instanceof: {
2923             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2924             Node* prototype = get(VirtualRegister(currentInstruction[3].u.operand));
2925             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(InstanceOf, value, prototype));
2926             NEXT_OPCODE(op_instanceof);
2927         }
2928             
2929         case op_is_undefined: {
2930             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2931             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsUndefined, value));
2932             NEXT_OPCODE(op_is_undefined);
2933         }
2934
2935         case op_is_boolean: {
2936             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2937             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsBoolean, value));
2938             NEXT_OPCODE(op_is_boolean);
2939         }
2940
2941         case op_is_number: {
2942             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2943             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsNumber, value));
2944             NEXT_OPCODE(op_is_number);
2945         }
2946
2947         case op_is_string: {
2948             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2949             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsString, value));
2950             NEXT_OPCODE(op_is_string);
2951         }
2952
2953         case op_is_object: {
2954             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2955             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsObject, value));
2956             NEXT_OPCODE(op_is_object);
2957         }
2958
2959         case op_is_object_or_null: {
2960             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2961             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsObjectOrNull, value));
2962             NEXT_OPCODE(op_is_object_or_null);
2963         }
2964
2965         case op_is_function: {
2966             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2967             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsFunction, value));
2968             NEXT_OPCODE(op_is_function);
2969         }
2970
2971         case op_not: {
2972             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2973             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, value));
2974             NEXT_OPCODE(op_not);
2975         }
2976             
2977         case op_to_primitive: {
2978             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
2979             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(ToPrimitive, value));
2980             NEXT_OPCODE(op_to_primitive);
2981         }
2982             
2983         case op_strcat: {
2984             int startOperand = currentInstruction[2].u.operand;
2985             int numOperands = currentInstruction[3].u.operand;
2986 #if CPU(X86)
2987             // X86 doesn't have enough registers to compile MakeRope with three arguments.
2988             // Rather than try to be clever, we just make MakeRope dumber on this processor.
2989             const unsigned maxRopeArguments = 2;
2990 #else
2991             const unsigned maxRopeArguments = 3;
2992 #endif
2993             auto toStringNodes = std::make_unique<Node*[]>(numOperands);
2994             for (int i = 0; i < numOperands; i++)
2995                 toStringNodes[i] = addToGraph(ToString, get(VirtualRegister(startOperand - i)));
2996
2997             for (int i = 0; i < numOperands; i++)
2998                 addToGraph(Phantom, toStringNodes[i]);
2999
3000             Node* operands[AdjacencyList::Size];
3001             unsigned indexInOperands = 0;
3002             for (unsigned i = 0; i < AdjacencyList::Size; ++i)
3003                 operands[i] = 0;
3004             for (int operandIdx = 0; operandIdx < numOperands; ++operandIdx) {
3005                 if (indexInOperands == maxRopeArguments) {
3006                     operands[0] = addToGraph(MakeRope, operands[0], operands[1], operands[2]);
3007                     for (unsigned i = 1; i < AdjacencyList::Size; ++i)
3008                         operands[i] = 0;
3009                     indexInOperands = 1;
3010                 }
3011                 
3012                 ASSERT(indexInOperands < AdjacencyList::Size);
3013                 ASSERT(indexInOperands < maxRopeArguments);
3014                 operands[indexInOperands++] = toStringNodes[operandIdx];
3015             }
3016             set(VirtualRegister(currentInstruction[1].u.operand),
3017                 addToGraph(MakeRope, operands[0], operands[1], operands[2]));
3018             NEXT_OPCODE(op_strcat);
3019         }
3020
3021         case op_less: {
3022             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
3023             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
3024             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareLess, op1, op2));
3025             NEXT_OPCODE(op_less);
3026         }
3027
3028         case op_lesseq: {
3029             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
3030             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
3031             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareLessEq, op1, op2));
3032             NEXT_OPCODE(op_lesseq);
3033         }
3034
3035         case op_greater: {
3036             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
3037             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
3038             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareGreater, op1, op2));
3039             NEXT_OPCODE(op_greater);
3040         }
3041
3042         case op_greatereq: {
3043             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
3044             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
3045             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareGreaterEq, op1, op2));
3046             NEXT_OPCODE(op_greatereq);
3047         }
3048
3049         case op_eq: {
3050             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
3051             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
3052             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareEq, op1, op2));
3053             NEXT_OPCODE(op_eq);
3054         }
3055
3056         case op_eq_null: {
3057             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
3058             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareEqConstant, value, addToGraph(JSConstant, OpInfo(m_constantNull))));
3059             NEXT_OPCODE(op_eq_null);
3060         }
3061
3062         case op_stricteq: {
3063             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
3064             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
3065             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareStrictEq, op1, op2));
3066             NEXT_OPCODE(op_stricteq);
3067         }
3068
3069         case op_neq: {
3070             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
3071             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
3072             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, addToGraph(CompareEq, op1, op2)));
3073             NEXT_OPCODE(op_neq);
3074         }
3075
3076         case op_neq_null: {
3077             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
3078             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, addToGraph(CompareEqConstant, value, addToGraph(JSConstant, OpInfo(m_constantNull)))));
3079             NEXT_OPCODE(op_neq_null);
3080         }
3081
3082         case op_nstricteq: {
3083             Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));
3084             Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand));
3085             Node* invertedResult;
3086             invertedResult = addToGraph(CompareStrictEq, op1, op2);
3087             set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, invertedResult));
3088             NEXT_OPCODE(op_nstricteq);
3089         }
3090
3091         // === Property access operations ===
3092
3093         case op_get_by_val: {
3094             SpeculatedType prediction = getPredictionWithoutOSRExit();
3095             
3096             Node* base = get(VirtualRegister(currentInstruction[2].u.operand));
3097             ArrayMode arrayMode = getArrayModeConsideringSlowPath(currentInstruction[4].u.arrayProfile, Array::Read);
3098             Node* property = get(VirtualRegister(currentInstruction[3].u.operand));
3099             Node* getByVal = addToGraph(GetByVal, OpInfo(arrayMode.asWord()), OpInfo(prediction), base, property);
3100             set(VirtualRegister(currentInstruction[1].u.operand), getByVal);
3101
3102             NEXT_OPCODE(op_get_by_val);
3103         }
3104
3105         case op_put_by_val_direct:
3106         case op_put_by_val: {
3107             Node* base = get(VirtualRegister(currentInstruction[1].u.operand));
3108
3109             ArrayMode arrayMode = getArrayModeConsideringSlowPath(currentInstruction[4].u.arrayProfile, Array::Write);
3110             
3111             Node* property = get(VirtualRegister(currentInstruction[2].u.operand));
3112             Node* value = get(VirtualRegister(currentInstruction[3].u.operand));
3113             
3114             addVarArgChild(base);
3115             addVarArgChild(property);
3116             addVarArgChild(value);
3117             addVarArgChild(0); // Leave room for property storage.
3118             addVarArgChild(0); // Leave room for length.
3119             addToGraph(Node::VarArg, opcodeID == op_put_by_val_direct ? PutByValDirect : PutByVal, OpInfo(arrayMode.asWord()), OpInfo(0));
3120
3121             NEXT_OPCODE(op_put_by_val);
3122         }
3123             
3124         case op_get_by_id:
3125         case op_get_by_id_out_of_line:
3126         case op_get_array_length: {
3127             SpeculatedType prediction = getPrediction();
3128             
3129             Node* base = get(VirtualRegister(currentInstruction[2].u.operand));
3130             unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand];
3131             
3132             AtomicStringImpl* uid = m_graph.identifiers()[identifierNumber];
3133             GetByIdStatus getByIdStatus = GetByIdStatus::computeFor(
3134                 m_inlineStackTop->m_profiledBlock, m_dfgCodeBlock,
3135                 m_inlineStackTop->m_stubInfos, m_dfgStubInfos,
3136                 currentCodeOrigin(), uid);
3137             
3138             handleGetById(
3139                 currentInstruction[1].u.operand, prediction, base, identifierNumber, getByIdStatus);
3140
3141             NEXT_OPCODE(op_get_by_id);
3142         }
3143         case op_put_by_id:
3144         case op_put_by_id_out_of_line:
3145         case op_put_by_id_transition_direct:
3146         case op_put_by_id_transition_normal:
3147         case op_put_by_id_transition_direct_out_of_line:
3148         case op_put_by_id_transition_normal_out_of_line: {
3149             Node* value = get(VirtualRegister(currentInstruction[3].u.operand));
3150             Node* base = get(VirtualRegister(currentInstruction[1].u.operand));
3151             unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
3152             bool direct = currentInstruction[8].u.operand;
3153
3154             PutByIdStatus putByIdStatus = PutByIdStatus::computeFor(
3155                 m_inlineStackTop->m_profiledBlock, m_dfgCodeBlock,
3156                 m_inlineStackTop->m_stubInfos, m_dfgStubInfos,
3157                 currentCodeOrigin(), m_graph.identifiers()[identifierNumber]);
3158             
3159             handlePutById(base, identifierNumber, value, putByIdStatus, direct);
3160             NEXT_OPCODE(op_put_by_id);
3161         }
3162
3163         case op_init_global_const_nop: {
3164             NEXT_OPCODE(op_init_global_const_nop);
3165         }
3166
3167         case op_init_global_const: {
3168             Node* value = get(VirtualRegister(currentInstruction[2].u.operand));
3169             JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
3170             addToGraph(
3171                 PutGlobalVar,
3172                 OpInfo(globalObject->assertVariableIsInThisObject(currentInstruction[1].u.variablePointer)),
3173                 weakJSConstant(globalObject), value);
3174             NEXT_OPCODE(op_init_global_const);
3175         }
3176
3177         case op_profile_type: {
3178             Node* valueToProfile = get(VirtualRegister(currentInstruction[1].u.operand));
3179             addToGraph(ProfileType, OpInfo(currentInstruction[2].u.location), valueToProfile);
3180             NEXT_OPCODE(op_profile_type);
3181         }
3182
3183         case op_profile_control_flow: {
3184             BasicBlockLocation* basicBlockLocation = currentInstruction[1].u.basicBlockLocation;
3185             addToGraph(ProfileControlFlow, OpInfo(basicBlockLocation));
3186             NEXT_OPCODE(op_profile_control_flow);
3187         }
3188
3189         // === Block terminators. ===
3190
3191         case op_jmp: {
3192             int relativeOffset = currentInstruction[1].u.operand;
3193             addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
3194             if (relativeOffset <= 0)
3195                 flushForTerminal();
3196             LAST_OPCODE(op_jmp);
3197         }
3198
3199         case op_jtrue: {
3200             unsigned relativeOffset = currentInstruction[2].u.operand;
3201             Node* condition = get(VirtualRegister(currentInstruction[1].u.operand));
3202             addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jtrue))), condition);
3203             LAST_OPCODE(op_jtrue);
3204         }
3205
3206         case op_jfalse: {
3207             unsigned relativeOffset = currentInstruction[2].u.operand;
3208             Node* condition = get(VirtualRegister(currentInstruction[1].u.operand));
3209             addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jfalse), m_currentIndex + relativeOffset)), condition);
3210             LAST_OPCODE(op_jfalse);
3211         }
3212
3213         case op_jeq_null: {
3214             unsigned relativeOffset = currentInstruction[2].u.operand;
3215             Node* value = get(VirtualRegister(currentInstruction[1].u.operand));
3216             Node* condition = addToGraph(CompareEqConstant, value, addToGraph(JSConstant, OpInfo(m_constantNull)));
3217             addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jeq_null))), condition);
3218             LAST_OPCODE(op_jeq_null);
3219         }
3220
3221         case op_jneq_null: {
3222             unsigned relativeOffset = currentInstruction[2].u.operand;
3223             Node* value = get(VirtualRegister(currentInstruction[1].u.operand));
3224             Node* condition = addToGraph(CompareEqConstant, value, addToGraph(JSConstant, OpInfo(m_constantNull)));
3225             addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jneq_null), m_currentIndex + relativeOffset)), condition);
3226             LAST_OPCODE(op_jneq_null);
3227         }
3228
3229         case op_jless: {
3230             unsigned relativeOffset = currentInstruction[3].u.operand;
3231             Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
3232             Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
3233             Node* condition = addToGraph(CompareLess, op1, op2);
3234             addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jless))), condition);
3235             LAST_OPCODE(op_jless);
3236         }
3237
3238         case op_jlesseq: {
3239             unsigned relativeOffset = currentInstruction[3].u.operand;
3240             Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
3241             Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
3242             Node* condition = addToGraph(CompareLessEq, op1, op2);
3243             addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jlesseq))), condition);
3244             LAST_OPCODE(op_jlesseq);
3245         }
3246
3247         case op_jgreater: {
3248             unsigned relativeOffset = currentInstruction[3].u.operand;
3249             Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
3250             Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
3251             Node* condition = addToGraph(CompareGreater, op1, op2);
3252             addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jgreater))), condition);
3253             LAST_OPCODE(op_jgreater);
3254         }
3255
3256         case op_jgreatereq: {
3257             unsigned relativeOffset = currentInstruction[3].u.operand;
3258             Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
3259             Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
3260             Node* condition = addToGraph(CompareGreaterEq, op1, op2);
3261             addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jgreatereq))), condition);
3262             LAST_OPCODE(op_jgreatereq);
3263         }
3264
3265         case op_jnless: {
3266             unsigned relativeOffset = currentInstruction[3].u.operand;
3267             Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
3268             Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
3269             Node* condition = addToGraph(CompareLess, op1, op2);
3270             addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jnless), m_currentIndex + relativeOffset)), condition);
3271             LAST_OPCODE(op_jnless);
3272         }
3273
3274         case op_jnlesseq: {
3275             unsigned relativeOffset = currentInstruction[3].u.operand;
3276             Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
3277             Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
3278             Node* condition = addToGraph(CompareLessEq, op1, op2);
3279             addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jnlesseq), m_currentIndex + relativeOffset)), condition);
3280             LAST_OPCODE(op_jnlesseq);
3281         }
3282
3283         case op_jngreater: {
3284             unsigned relativeOffset = currentInstruction[3].u.operand;
3285             Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
3286             Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
3287             Node* condition = addToGraph(CompareGreater, op1, op2);
3288             addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jngreater), m_currentIndex + relativeOffset)), condition);
3289             LAST_OPCODE(op_jngreater);
3290         }
3291
3292         case op_jngreatereq: {
3293             unsigned relativeOffset = currentInstruction[3].u.operand;
3294             Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand));
3295             Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand));
3296             Node* condition = addToGraph(CompareGreaterEq, op1, op2);
3297             addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jngreatereq), m_currentIndex + relativeOffset)), condition);
3298             LAST_OPCODE(op_jngreatereq);
3299         }
3300             
3301         case op_switch_imm: {
3302             SwitchData& data = *m_graph.m_switchData.add();
3303             data.kind = SwitchImm;
3304             data.switchTableIndex = m_inlineStackTop->m_switchRemap[currentInstruction[1].u.operand];
3305             data.fallThrough.setBytecodeIndex(m_currentIndex + currentInstruction[2].u.operand);
3306             SimpleJumpTable& table = m_codeBlock->switchJumpTable(data.switchTableIndex);
3307             for (unsigned i = 0; i < table.branchOffsets.size(); ++i) {
3308                 if (!table.branchOffsets[i])
3309                     continue;
3310                 unsigned target = m_currentIndex + table.branchOffsets[i];
3311                 if (target == data.fallThrough.bytecodeIndex())
3312                     continue;
3313                 data.cases.append(SwitchCase::withBytecodeIndex(m_graph.freeze(jsNumber(static_cast<int32_t>(table.min + i))), target));
3314             }
3315             addToGraph(Switch, OpInfo(&data), get(VirtualRegister(currentInstruction[3].u.operand)));
3316             flushIfTerminal(data);
3317             LAST_OPCODE(op_switch_imm);
3318         }
3319             
3320         case op_switch_char: {
3321             SwitchData& data = *m_graph.m_switchData.add();
3322             data.kind = SwitchChar;
3323             data.switchTableIndex = m_inlineStackTop->m_switchRemap[currentInstruction[1].u.operand];
3324             data.fallThrough.setBytecodeIndex(m_currentIndex + currentInstruction[2].u.operand);
3325             SimpleJumpTable& table = m_codeBlock->switchJumpTable(data.switchTableIndex);
3326             for (unsigned i = 0; i < table.branchOffsets.size(); ++i) {
3327                 if (!table.branchOffsets[i])
3328                     continue;
3329                 unsigned target = m_currentIndex + table.branchOffsets[i];
3330                 if (target == data.fallThrough.bytecodeIndex())
3331                     continue;
3332                 data.cases.append(
3333                     SwitchCase::withBytecodeIndex(LazyJSValue::singleCharacterString(table.min + i), target));
3334             }
3335             addToGraph(Switch, OpInfo(&data), get(VirtualRegister(currentInstruction[3].u.operand)));
3336             flushIfTerminal(data);
3337             LAST_OPCODE(op_switch_char);
3338         }
3339
3340         case op_switch_string: {
3341             SwitchData& data = *m_graph.m_switchData.add();
3342             data.kind = SwitchString;
3343             data.switchTableIndex = currentInstruction[1].u.operand;
3344             data.fallThrough.setBytecodeIndex(m_currentIndex + currentInstruction[2].u.operand);
3345             StringJumpTable& table = m_codeBlock->stringSwitchJumpTable(data.switchTableIndex);
3346             StringJumpTable::StringOffsetTable::iterator iter;
3347             StringJumpTable::StringOffsetTable::iterator end = table.offsetTable.end();
3348             for (iter = table.offsetTable.begin(); iter != end; ++iter) {
3349                 unsigned target = m_currentIndex + iter->value.branchOffset;
3350                 if (target == data.fallThrough.bytecodeIndex())
3351                     continue;
3352                 data.cases.append(
3353                     SwitchCase::withBytecodeIndex(LazyJSValue::knownStringImpl(iter->key.get()), target));
3354             }
3355             addToGraph(Switch, OpInfo(&data), get(VirtualRegister(currentInstruction[3].u.operand)));
3356             flushIfTerminal(data);
3357             LAST_OPCODE(op_switch_string);
3358         }
3359
3360         case op_ret:
3361             if (inlineCallFrame()) {
3362                 flushForReturn();
3363                 if (m_inlineStackTop->m_returnValue.isValid())
3364                     setDirect(m_inlineStackTop->m_returnValue, get(VirtualRegister(currentInstruction[1].u.operand)), ImmediateSetWithFlush);
3365                 m_inlineStackTop->m_didReturn = true;
3366                 if (m_inlineStackTop->m_unlinkedBlocks.isEmpty()) {
3367                     // If we're returning from the first block, then we're done parsing.
3368                     ASSERT(m_inlineStackTop->m_callsiteBlockHead == m_graph.lastBlock());
3369                     shouldContinueParsing = false;
3370                     LAST_OPCODE(op_ret);
3371                 } else {
3372                     // If inlining created blocks, and we're doing a return, then we need some
3373                     // special linking.
3374                     ASSERT(m_inlineStackTop->m_unlinkedBlocks.last().m_block == m_graph.lastBlock());
3375                     m_inlineStackTop->m_unlinkedBlocks.last().m_needsNormalLinking = false;
3376                 }
3377                 if (m_currentIndex + OPCODE_LENGTH(op_ret) != m_inlineStackTop->m_codeBlock->instructions().size() || m_inlineStackTop->m_didEarlyReturn) {
3378                     ASSERT(m_currentIndex + OPCODE_LENGTH(op_ret) <= m_inlineStackTop->m_codeBlock->instructions().size());
3379                     addToGraph(Jump, OpInfo(0));
3380                     m_inlineStackTop->m_unlinkedBlocks.last().m_needsEarlyReturnLinking = true;
3381                     m_inlineStackTop->m_didEarlyReturn = true;
3382                 }
3383                 LAST_OPCODE(op_ret);
3384             }
3385             addToGraph(Return, get(VirtualRegister(currentInstruction[1].u.operand)));
3386             flushForReturn();
3387             LAST_OPCODE(op_ret);
3388             
3389         case op_end:
3390             ASSERT(!inlineCallFrame());
3391             addToGraph(Return, get(VirtualRegister(currentInstruction[1].u.operand)));
3392             flushForReturn();
3393             LAST_OPCODE(op_end);
3394
3395         case op_throw:
3396             addToGraph(Throw, get(VirtualRegister(currentInstruction[1].u.operand)));
3397             flushForTerminal();
3398             addToGraph(Unreachable);
3399             LAST_OPCODE(op_throw);
3400             
3401         case op_throw_static_error:
3402             addToGraph(ThrowReferenceError);
3403             flushForTerminal();
3404             addToGraph(Unreachable);
3405             LAST_OPCODE(op_throw_static_error);
3406             
3407         case op_call:
3408             handleCall(currentInstruction, Call, CodeForCall);
3409             NEXT_OPCODE(op_call);
3410             
3411         case op_construct:
3412             handleCall(currentInstruction, Construct, CodeForConstruct);
3413             NEXT_OPCODE(op_construct);
3414             
3415         case op_call_varargs: {
3416             handleVarargsCall(currentInstruction, CallVarargs, CodeForCall);
3417             NEXT_OPCODE(op_call_varargs);
3418         }
3419             
3420         case op_construct_varargs: {
3421             handleVarargsCall(currentInstruction, ConstructVarargs, CodeForConstruct);
3422             NEXT_OPCODE(op_construct_varargs);
3423         }
3424             
3425         case op_jneq_ptr:
3426             // Statically speculate for now. It makes sense to let speculate-only jneq_ptr
3427             // support simmer for a while before making it more general, since it's
3428             // already gnarly enough as it is.
3429             ASSERT(pointerIsFunction(currentInstruction[2].u.specialPointer));
3430             addToGraph(
3431                 CheckCell,
3432                 OpInfo(m_graph.freeze(static_cast<JSCell*>(actualPointerFor(
3433                     m_inlineStackTop->m_codeBlock, currentInstruction[2].u.specialPointer)))),
3434                 get(VirtualRegister(currentInstruction[1].u.operand)));
3435             addToGraph(Jump, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jneq_ptr)));
3436             LAST_OPCODE(op_jneq_ptr);
3437
3438         case op_resolve_scope: {
3439             int dst = currentInstruction[1].u.operand;
3440             ResolveType resolveType = static_cast<ResolveType>(currentInstruction[4].u.operand);
3441             unsigned depth = currentInstruction[5].u.operand;
3442
3443             // get_from_scope and put_to_scope depend on this watchpoint forcing OSR exit, so they don't add their own watchpoints.
3444             if (needsVarInjectionChecks(resolveType))
3445                 addToGraph(VarInjectionWatchpoint);
3446
3447             switch (resolveType) {
3448             case GlobalProperty:
3449             case GlobalVar:
3450             case GlobalPropertyWithVarInjectionChecks:
3451             case GlobalVarWithVarInjectionChecks:
3452                 set(VirtualRegister(dst), weakJSConstant(m_inlineStackTop->m_codeBlock->globalObject()));
3453                 if (resolveType == GlobalPropertyWithVarInjectionChecks || resolveType == GlobalVarWithVarInjectionChecks)
3454                     addToGraph(Phantom, getDirect(m_inlineStackTop->remapOperand(VirtualRegister(currentInstruction[2].u.operand))));
3455                 break;
3456             case LocalClosureVar:
3457             case ClosureVar:
3458             case ClosureVarWithVarInjectionChecks: {
3459                 Node* localBase = get(VirtualRegister(currentInstruction[2].u.operand));
3460                 addToGraph(Phantom, localBase); // OSR exit cannot handle resolve_scope on a DCE'd scope.
3461                 
3462                 // We have various forms of constant folding here. This is necessary to avoid
3463                 // spurious recompiles in dead-but-foldable code.
3464                 if (SymbolTable* symbolTable = currentInstruction[6].u.symbolTable.get()) {
3465                     InferredValue* singleton = symbolTable->singletonScope();
3466                     if (JSValue value = singleton->inferredValue()) {
3467                         m_graph.watchpoints().addLazily(singleton);
3468                         set(VirtualRegister(dst), weakJSConstant(value));
3469                         break;
3470                     }
3471                 }
3472                 if (JSScope* scope = localBase->dynamicCastConstant<JSScope*>()) {
3473                     for (unsigned n = depth; n--;)
3474                         scope = scope->next();
3475                     set(VirtualRegister(dst), weakJSConstant(scope));
3476                     break;
3477                 }
3478                 for (unsigned n = depth; n--;)
3479                     localBase = addToGraph(SkipScope, localBase);
3480                 set(VirtualRegister(dst), localBase);
3481                 break;
3482             }
3483             case Dynamic:
3484                 RELEASE_ASSERT_NOT_REACHED();
3485                 break;
3486             }
3487             NEXT_OPCODE(op_resolve_scope);
3488         }
3489
3490         case op_get_from_scope: {
3491             int dst = currentInstruction[1].u.operand;
3492             int scope = currentInstruction[2].u.operand;
3493             unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand];
3494             AtomicStringImpl* uid = m_graph.identifiers()[identifierNumber];
3495             ResolveType resolveType = ResolveModeAndType(currentInstruction[4].u.operand).type();
3496
3497             Structure* structure = 0;
3498             WatchpointSet* watchpoints = 0;
3499             uintptr_t operand;
3500             {
3501                 ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
3502                 if (resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks)
3503                     watchpoints = currentInstruction[5].u.watchpointSet;
3504                 else
3505                     structure = currentInstruction[5].u.structure.get();
3506                 operand = reinterpret_cast<uintptr_t>(currentInstruction[6].u.pointer);
3507             }
3508
3509             UNUSED_PARAM(watchpoints); // We will use this in the future. For now we set it as a way of documenting the fact that that's what index 5 is in GlobalVar mode.
3510
3511             JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
3512
3513             switch (resolveType) {
3514             case GlobalProperty:
3515             case GlobalPropertyWithVarInjectionChecks: {
3516                 SpeculatedType prediction = getPrediction();
3517                 GetByIdStatus status = GetByIdStatus::computeFor(structure, uid);
3518                 if (status.state() != GetByIdStatus::Simple
3519                     || status.numVariants() != 1
3520                     || status[0].structureSet().size() != 1) {
3521                     set(VirtualRegister(dst), addToGraph(GetByIdFlush, OpInfo(identifierNumber), OpInfo(prediction), get(VirtualRegister(scope))));
3522                     break;
3523                 }
3524                 Node* base = cellConstantWithStructureCheck(globalObject, status[0].structureSet().onlyStructure());
3525                 addToGraph(Phantom, get(VirtualRegister(scope)));
3526                 set(VirtualRegister(dst), handleGetByOffset(prediction, base, status[0].structureSet(), identifierNumber, operand));
3527                 break;
3528             }
3529             case GlobalVar:
3530             case GlobalVarWithVarInjectionChecks: {
3531                 addToGraph(Phantom, get(VirtualRegister(scope)));
3532                 WatchpointSet* watchpointSet;
3533                 ScopeOffset offset;
3534                 {
3535                     ConcurrentJITLocker locker(globalObject->symbolTable()->m_lock);
3536                     SymbolTableEntry entry = globalObject->symbolTable()->get(locker, uid);
3537                     watchpointSet = entry.watchpointSet();
3538                     offset = entry.scopeOffset();
3539                 }
3540                 if (watchpointSet && watchpointSet->state() == IsWatched) {
3541                     // This has a fun concurrency story. There is the possibility of a race in two
3542                     // directions:
3543                     //
3544                     // We see that the set IsWatched, but in the meantime it gets invalidated: this is
3545                     // fine because if we saw that it IsWatched then we add a watchpoint. If it gets
3546                     // invalidated, then this compilation is invalidated. Note that in the meantime we
3547                     // may load an absurd value from the global object. It's fine to load an absurd
3548                     // value if the compilation is invalidated anyway.
3549                     //
3550                     // We see that the set IsWatched, but the value isn't yet initialized: this isn't
3551                     // possible because of the ordering of operations.
3552                     //
3553                     // Here's how we order operations:
3554                     //
3555                     // Main thread stores to the global object: always store a value first, and only
3556                     // after that do we touch the watchpoint set. There is a fence in the touch, that
3557                     // ensures that the store to the global object always happens before the touch on the
3558                     // set.
3559                     //
3560                     // Compilation thread: always first load the state of the watchpoint set, and then
3561                     // load the value. The WatchpointSet::state() method does fences for us to ensure
3562                     // that the load of the state happens before our load of the value.
3563                     //
3564                     // Finalizing compilation: this happens on the main thread and synchronously checks
3565                     // validity of all watchpoint sets.
3566                     //
3567                     // We will only perform optimizations if the load of the state yields IsWatched. That
3568                     // means that at least one store would have happened to initialize the original value
3569                     // of the variable (that is, the value we'd like to constant fold to). There may be
3570                     // other stores that happen after that, but those stores will invalidate the
3571                     // watchpoint set and also the compilation.
3572                     
3573                     // Note that we need to use the operand, which is a direct pointer at the global,
3574                     // rather than looking up the global by doing variableAt(offset). That's because the
3575                     // internal data structures of JSSegmentedVariableObject are not thread-safe even
3576                     // though accessing the global itself is. The segmentation involves a vector spine
3577