0977c04be339f1044e0543741698b0b5760a8295
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGByteCodeParser.cpp
1 /*
2  * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGByteCodeParser.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "ArrayConstructor.h"
32 #include "CallLinkStatus.h"
33 #include "CodeBlock.h"
34 #include "CodeBlockWithJITType.h"
35 #include "DFGArrayMode.h"
36 #include "DFGCapabilities.h"
37 #include "GetByIdStatus.h"
38 #include "Operations.h"
39 #include "PreciseJumpTargets.h"
40 #include "PutByIdStatus.h"
41 #include "StringConstructor.h"
42 #include <wtf/CommaPrinter.h>
43 #include <wtf/HashMap.h>
44 #include <wtf/MathExtras.h>
45
46 namespace JSC { namespace DFG {
47
48 class ConstantBufferKey {
49 public:
50     ConstantBufferKey()
51         : m_codeBlock(0)
52         , m_index(0)
53     {
54     }
55     
56     ConstantBufferKey(WTF::HashTableDeletedValueType)
57         : m_codeBlock(0)
58         , m_index(1)
59     {
60     }
61     
62     ConstantBufferKey(CodeBlock* codeBlock, unsigned index)
63         : m_codeBlock(codeBlock)
64         , m_index(index)
65     {
66     }
67     
68     bool operator==(const ConstantBufferKey& other) const
69     {
70         return m_codeBlock == other.m_codeBlock
71             && m_index == other.m_index;
72     }
73     
74     unsigned hash() const
75     {
76         return WTF::PtrHash<CodeBlock*>::hash(m_codeBlock) ^ m_index;
77     }
78     
79     bool isHashTableDeletedValue() const
80     {
81         return !m_codeBlock && m_index;
82     }
83     
84     CodeBlock* codeBlock() const { return m_codeBlock; }
85     unsigned index() const { return m_index; }
86     
87 private:
88     CodeBlock* m_codeBlock;
89     unsigned m_index;
90 };
91
92 struct ConstantBufferKeyHash {
93     static unsigned hash(const ConstantBufferKey& key) { return key.hash(); }
94     static bool equal(const ConstantBufferKey& a, const ConstantBufferKey& b)
95     {
96         return a == b;
97     }
98     
99     static const bool safeToCompareToEmptyOrDeleted = true;
100 };
101
102 } } // namespace JSC::DFG
103
104 namespace WTF {
105
106 template<typename T> struct DefaultHash;
107 template<> struct DefaultHash<JSC::DFG::ConstantBufferKey> {
108     typedef JSC::DFG::ConstantBufferKeyHash Hash;
109 };
110
111 template<typename T> struct HashTraits;
112 template<> struct HashTraits<JSC::DFG::ConstantBufferKey> : SimpleClassHashTraits<JSC::DFG::ConstantBufferKey> { };
113
114 } // namespace WTF
115
116 namespace JSC { namespace DFG {
117
118 // === ByteCodeParser ===
119 //
120 // This class is used to compile the dataflow graph from a CodeBlock.
121 class ByteCodeParser {
122 public:
123     ByteCodeParser(Graph& graph)
124         : m_vm(&graph.m_vm)
125         , m_codeBlock(graph.m_codeBlock)
126         , m_profiledBlock(graph.m_profiledBlock)
127         , m_graph(graph)
128         , m_currentBlock(0)
129         , m_currentIndex(0)
130         , m_constantUndefined(UINT_MAX)
131         , m_constantNull(UINT_MAX)
132         , m_constantNaN(UINT_MAX)
133         , m_constant1(UINT_MAX)
134         , m_constants(m_codeBlock->numberOfConstantRegisters())
135         , m_numArguments(m_codeBlock->numParameters())
136         , m_numLocals(m_codeBlock->m_numCalleeRegisters)
137         , m_preservedVars(m_codeBlock->m_numVars)
138         , m_parameterSlots(0)
139         , m_numPassedVarArgs(0)
140         , m_inlineStackTop(0)
141         , m_haveBuiltOperandMaps(false)
142         , m_emptyJSValueIndex(UINT_MAX)
143         , m_currentInstruction(0)
144     {
145         ASSERT(m_profiledBlock);
146         
147         for (int i = 0; i < m_codeBlock->m_numVars; ++i)
148             m_preservedVars.set(i);
149     }
150     
151     // Parse a full CodeBlock of bytecode.
152     bool parse();
153     
154 private:
155     struct InlineStackEntry;
156
157     // Just parse from m_currentIndex to the end of the current CodeBlock.
158     void parseCodeBlock();
159
160     // Helper for min and max.
161     bool handleMinMax(int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis);
162     
163     // Handle calls. This resolves issues surrounding inlining and intrinsics.
164     void handleCall(Instruction* currentInstruction, NodeType op, CodeSpecializationKind);
165     void emitFunctionChecks(const CallLinkStatus&, Node* callTarget, int registerOffset, CodeSpecializationKind);
166     void emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis, CodeSpecializationKind);
167     // Handle inlining. Return true if it succeeded, false if we need to plant a call.
168     bool handleInlining(Node* callTargetNode, int resultOperand, const CallLinkStatus&, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, CodeSpecializationKind);
169     // Handle intrinsic functions. Return true if it succeeded, false if we need to plant a call.
170     bool handleIntrinsic(int resultOperand, Intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction);
171     bool handleConstantInternalFunction(int resultOperand, InternalFunction*, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, CodeSpecializationKind);
172     Node* handlePutByOffset(Node* base, unsigned identifier, PropertyOffset, Node* value);
173     Node* handleGetByOffset(SpeculatedType, Node* base, unsigned identifierNumber, PropertyOffset);
174     void handleGetByOffset(
175         int destinationOperand, SpeculatedType, Node* base, unsigned identifierNumber,
176         PropertyOffset);
177     void handleGetById(
178         int destinationOperand, SpeculatedType, Node* base, unsigned identifierNumber,
179         const GetByIdStatus&);
180
181     Node* getScope(bool skipTop, unsigned skipCount);
182     
183     // Prepare to parse a block.
184     void prepareToParseBlock();
185     // Parse a single basic block of bytecode instructions.
186     bool parseBlock(unsigned limit);
187     // Link block successors.
188     void linkBlock(BasicBlock*, Vector<BasicBlock*>& possibleTargets);
189     void linkBlocks(Vector<UnlinkedBlock>& unlinkedBlocks, Vector<BasicBlock*>& possibleTargets);
190     
191     VariableAccessData* newVariableAccessData(int operand, bool isCaptured)
192     {
193         ASSERT(operand < FirstConstantRegisterIndex);
194         
195         m_graph.m_variableAccessData.append(VariableAccessData(static_cast<VirtualRegister>(operand), isCaptured));
196         return &m_graph.m_variableAccessData.last();
197     }
198     
199     // Get/Set the operands/result of a bytecode instruction.
200     Node* getDirect(int operand)
201     {
202         // Is this a constant?
203         if (operand >= FirstConstantRegisterIndex) {
204             unsigned constant = operand - FirstConstantRegisterIndex;
205             ASSERT(constant < m_constants.size());
206             return getJSConstant(constant);
207         }
208
209         ASSERT(operand != JSStack::Callee);
210         
211         // Is this an argument?
212         if (operandIsArgument(operand))
213             return getArgument(operand);
214
215         // Must be a local.
216         return getLocal((unsigned)operand);
217     }
218     Node* get(int operand)
219     {
220         if (operand == JSStack::Callee) {
221             if (inlineCallFrame() && inlineCallFrame()->callee)
222                 return cellConstant(inlineCallFrame()->callee.get());
223             
224             return getCallee();
225         }
226         
227         return getDirect(m_inlineStackTop->remapOperand(operand));
228     }
229     enum SetMode { NormalSet, SetOnEntry };
230     void setDirect(int operand, Node* value, SetMode setMode = NormalSet)
231     {
232         // Is this an argument?
233         if (operandIsArgument(operand)) {
234             setArgument(operand, value, setMode);
235             return;
236         }
237
238         // Must be a local.
239         setLocal((unsigned)operand, value, setMode);
240     }
241     void set(int operand, Node* value, SetMode setMode = NormalSet)
242     {
243         setDirect(m_inlineStackTop->remapOperand(operand), value, setMode);
244     }
245     
246     Node* injectLazyOperandSpeculation(Node* node)
247     {
248         ASSERT(node->op() == GetLocal);
249         ASSERT(node->codeOrigin.bytecodeIndex == m_currentIndex);
250         ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
251         LazyOperandValueProfileKey key(m_currentIndex, node->local());
252         SpeculatedType prediction = m_inlineStackTop->m_lazyOperands.prediction(locker, key);
253 #if DFG_ENABLE(DEBUG_VERBOSE)
254         dataLog("Lazy operand [@", node->index(), ", bc#", m_currentIndex, ", r", node->local(), "] prediction: ", SpeculationDump(prediction), "\n");
255 #endif
256         node->variableAccessData()->predict(prediction);
257         return node;
258     }
259
260     // Used in implementing get/set, above, where the operand is a local variable.
261     Node* getLocal(unsigned operand)
262     {
263         Node* node = m_currentBlock->variablesAtTail.local(operand);
264         bool isCaptured = m_codeBlock->isCaptured(operand, inlineCallFrame());
265         
266         // This has two goals: 1) link together variable access datas, and 2)
267         // try to avoid creating redundant GetLocals. (1) is required for
268         // correctness - no other phase will ensure that block-local variable
269         // access data unification is done correctly. (2) is purely opportunistic
270         // and is meant as an compile-time optimization only.
271         
272         VariableAccessData* variable;
273         
274         if (node) {
275             variable = node->variableAccessData();
276             variable->mergeIsCaptured(isCaptured);
277             
278             if (!isCaptured) {
279                 switch (node->op()) {
280                 case GetLocal:
281                     return node;
282                 case SetLocal:
283                     return node->child1().node();
284                 default:
285                     break;
286                 }
287             }
288         } else {
289             m_preservedVars.set(operand);
290             variable = newVariableAccessData(operand, isCaptured);
291         }
292         
293         node = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variable)));
294         m_currentBlock->variablesAtTail.local(operand) = node;
295         return node;
296     }
297     void setLocal(unsigned operand, Node* value, SetMode setMode = NormalSet)
298     {
299         bool isCaptured = m_codeBlock->isCaptured(operand, inlineCallFrame());
300         
301         if (setMode == NormalSet) {
302             ArgumentPosition* argumentPosition = findArgumentPositionForLocal(operand);
303             if (isCaptured || argumentPosition)
304                 flushDirect(operand, argumentPosition);
305         }
306
307         VariableAccessData* variableAccessData = newVariableAccessData(operand, isCaptured);
308         variableAccessData->mergeStructureCheckHoistingFailed(
309             m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
310         variableAccessData->mergeCheckArrayHoistingFailed(
311             m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIndexingType));
312         Node* node = addToGraph(SetLocal, OpInfo(variableAccessData), value);
313         m_currentBlock->variablesAtTail.local(operand) = node;
314     }
315
316     // Used in implementing get/set, above, where the operand is an argument.
317     Node* getArgument(unsigned operand)
318     {
319         unsigned argument = operandToArgument(operand);
320         ASSERT(argument < m_numArguments);
321         
322         Node* node = m_currentBlock->variablesAtTail.argument(argument);
323         bool isCaptured = m_codeBlock->isCaptured(operand);
324
325         VariableAccessData* variable;
326         
327         if (node) {
328             variable = node->variableAccessData();
329             variable->mergeIsCaptured(isCaptured);
330             
331             switch (node->op()) {
332             case GetLocal:
333                 return node;
334             case SetLocal:
335                 return node->child1().node();
336             default:
337                 break;
338             }
339         } else
340             variable = newVariableAccessData(operand, isCaptured);
341         
342         node = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variable)));
343         m_currentBlock->variablesAtTail.argument(argument) = node;
344         return node;
345     }
346     void setArgument(int operand, Node* value, SetMode setMode = NormalSet)
347     {
348         unsigned argument = operandToArgument(operand);
349         ASSERT(argument < m_numArguments);
350         
351         bool isCaptured = m_codeBlock->isCaptured(operand);
352
353         VariableAccessData* variableAccessData = newVariableAccessData(operand, isCaptured);
354
355         // Always flush arguments, except for 'this'. If 'this' is created by us,
356         // then make sure that it's never unboxed.
357         if (argument) {
358             if (setMode == NormalSet)
359                 flushDirect(operand);
360         } else if (m_codeBlock->specializationKind() == CodeForConstruct)
361             variableAccessData->mergeShouldNeverUnbox(true);
362         
363         variableAccessData->mergeStructureCheckHoistingFailed(
364             m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
365         variableAccessData->mergeCheckArrayHoistingFailed(
366             m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIndexingType));
367         Node* node = addToGraph(SetLocal, OpInfo(variableAccessData), value);
368         m_currentBlock->variablesAtTail.argument(argument) = node;
369     }
370     
371     ArgumentPosition* findArgumentPositionForArgument(int argument)
372     {
373         InlineStackEntry* stack = m_inlineStackTop;
374         while (stack->m_inlineCallFrame)
375             stack = stack->m_caller;
376         return stack->m_argumentPositions[argument];
377     }
378     
379     ArgumentPosition* findArgumentPositionForLocal(int operand)
380     {
381         for (InlineStackEntry* stack = m_inlineStackTop; ; stack = stack->m_caller) {
382             InlineCallFrame* inlineCallFrame = stack->m_inlineCallFrame;
383             if (!inlineCallFrame)
384                 break;
385             if (operand >= static_cast<int>(inlineCallFrame->stackOffset - JSStack::CallFrameHeaderSize))
386                 continue;
387             if (operand == inlineCallFrame->stackOffset + CallFrame::thisArgumentOffset())
388                 continue;
389             if (operand < static_cast<int>(inlineCallFrame->stackOffset - JSStack::CallFrameHeaderSize - inlineCallFrame->arguments.size()))
390                 continue;
391             int argument = operandToArgument(operand - inlineCallFrame->stackOffset);
392             return stack->m_argumentPositions[argument];
393         }
394         return 0;
395     }
396     
397     ArgumentPosition* findArgumentPosition(int operand)
398     {
399         if (operandIsArgument(operand))
400             return findArgumentPositionForArgument(operandToArgument(operand));
401         return findArgumentPositionForLocal(operand);
402     }
403
404     void addConstant(JSValue value)
405     {
406         unsigned constantIndex = m_codeBlock->addConstantLazily();
407         initializeLazyWriteBarrierForConstant(
408             m_graph.m_plan.writeBarriers,
409             m_codeBlock->constants()[constantIndex],
410             m_codeBlock,
411             constantIndex,
412             m_codeBlock->ownerExecutable(), 
413             value);
414     }
415     
416     void flush(int operand)
417     {
418         flushDirect(m_inlineStackTop->remapOperand(operand));
419     }
420     
421     void flushDirect(int operand)
422     {
423         flushDirect(operand, findArgumentPosition(operand));
424     }
425     
426     void flushDirect(int operand, ArgumentPosition* argumentPosition)
427     {
428         bool isCaptured = m_codeBlock->isCaptured(operand, inlineCallFrame());
429         
430         ASSERT(operand < FirstConstantRegisterIndex);
431         
432         if (!operandIsArgument(operand))
433             m_preservedVars.set(operand);
434         
435         Node* node = m_currentBlock->variablesAtTail.operand(operand);
436         
437         VariableAccessData* variable;
438         
439         if (node) {
440             variable = node->variableAccessData();
441             variable->mergeIsCaptured(isCaptured);
442         } else
443             variable = newVariableAccessData(operand, isCaptured);
444         
445         node = addToGraph(Flush, OpInfo(variable));
446         m_currentBlock->variablesAtTail.operand(operand) = node;
447         if (argumentPosition)
448             argumentPosition->addVariable(variable);
449     }
450
451     void flush(InlineStackEntry* inlineStackEntry)
452     {
453         int numArguments;
454         if (InlineCallFrame* inlineCallFrame = inlineStackEntry->m_inlineCallFrame)
455             numArguments = inlineCallFrame->arguments.size();
456         else
457             numArguments = inlineStackEntry->m_codeBlock->numParameters();
458         for (unsigned argument = numArguments; argument-- > 1;)
459             flushDirect(inlineStackEntry->remapOperand(argumentToOperand(argument)));
460         for (int local = 0; local < inlineStackEntry->m_codeBlock->m_numVars; ++local) {
461             if (!inlineStackEntry->m_codeBlock->isCaptured(local))
462                 continue;
463             flushDirect(inlineStackEntry->remapOperand(local));
464         }
465     }
466
467     void flushAllArgumentsAndCapturedVariablesInInlineStack()
468     {
469         for (InlineStackEntry* inlineStackEntry = m_inlineStackTop; inlineStackEntry; inlineStackEntry = inlineStackEntry->m_caller)
470             flush(inlineStackEntry);
471     }
472
473     void flushArgumentsAndCapturedVariables()
474     {
475         flush(m_inlineStackTop);
476     }
477
478     // Get an operand, and perform a ToInt32/ToNumber conversion on it.
479     Node* getToInt32(int operand)
480     {
481         return toInt32(get(operand));
482     }
483
484     // Perform an ES5 ToInt32 operation - returns a node of type NodeResultInt32.
485     Node* toInt32(Node* node)
486     {
487         if (node->hasInt32Result())
488             return node;
489
490         if (node->op() == UInt32ToNumber)
491             return node->child1().node();
492
493         // Check for numeric constants boxed as JSValues.
494         if (canFold(node)) {
495             JSValue v = valueOfJSConstant(node);
496             if (v.isInt32())
497                 return getJSConstant(node->constantNumber());
498             if (v.isNumber())
499                 return getJSConstantForValue(JSValue(JSC::toInt32(v.asNumber())));
500         }
501
502         return addToGraph(ValueToInt32, node);
503     }
504
505     // NOTE: Only use this to construct constants that arise from non-speculative
506     // constant folding. I.e. creating constants using this if we had constant
507     // field inference would be a bad idea, since the bytecode parser's folding
508     // doesn't handle liveness preservation.
509     Node* getJSConstantForValue(JSValue constantValue)
510     {
511         unsigned constantIndex;
512         if (!m_codeBlock->findConstant(constantValue, constantIndex)) {
513             addConstant(constantValue);
514             m_constants.append(ConstantRecord());
515         }
516         
517         ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters());
518         
519         return getJSConstant(constantIndex);
520     }
521
522     Node* getJSConstant(unsigned constant)
523     {
524         Node* node = m_constants[constant].asJSValue;
525         if (node)
526             return node;
527
528         Node* result = addToGraph(JSConstant, OpInfo(constant));
529         m_constants[constant].asJSValue = result;
530         return result;
531     }
532
533     Node* getCallee()
534     {
535         return addToGraph(GetCallee);
536     }
537
538     // Helper functions to get/set the this value.
539     Node* getThis()
540     {
541         return get(m_inlineStackTop->m_codeBlock->thisRegister());
542     }
543     void setThis(Node* value)
544     {
545         set(m_inlineStackTop->m_codeBlock->thisRegister(), value);
546     }
547
548     // Convenience methods for checking nodes for constants.
549     bool isJSConstant(Node* node)
550     {
551         return node->op() == JSConstant;
552     }
553     bool isInt32Constant(Node* node)
554     {
555         return isJSConstant(node) && valueOfJSConstant(node).isInt32();
556     }
557     // Convenience methods for getting constant values.
558     JSValue valueOfJSConstant(Node* node)
559     {
560         ASSERT(isJSConstant(node));
561         return m_codeBlock->getConstant(FirstConstantRegisterIndex + node->constantNumber());
562     }
563     int32_t valueOfInt32Constant(Node* node)
564     {
565         ASSERT(isInt32Constant(node));
566         return valueOfJSConstant(node).asInt32();
567     }
568     
569     // This method returns a JSConstant with the value 'undefined'.
570     Node* constantUndefined()
571     {
572         // Has m_constantUndefined been set up yet?
573         if (m_constantUndefined == UINT_MAX) {
574             // Search the constant pool for undefined, if we find it, we can just reuse this!
575             unsigned numberOfConstants = m_codeBlock->numberOfConstantRegisters();
576             for (m_constantUndefined = 0; m_constantUndefined < numberOfConstants; ++m_constantUndefined) {
577                 JSValue testMe = m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantUndefined);
578                 if (testMe.isUndefined())
579                     return getJSConstant(m_constantUndefined);
580             }
581
582             // Add undefined to the CodeBlock's constants, and add a corresponding slot in m_constants.
583             ASSERT(m_constants.size() == numberOfConstants);
584             addConstant(jsUndefined());
585             m_constants.append(ConstantRecord());
586             ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters());
587         }
588
589         // m_constantUndefined must refer to an entry in the CodeBlock's constant pool that has the value 'undefined'.
590         ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantUndefined).isUndefined());
591         return getJSConstant(m_constantUndefined);
592     }
593
594     // This method returns a JSConstant with the value 'null'.
595     Node* constantNull()
596     {
597         // Has m_constantNull been set up yet?
598         if (m_constantNull == UINT_MAX) {
599             // Search the constant pool for null, if we find it, we can just reuse this!
600             unsigned numberOfConstants = m_codeBlock->numberOfConstantRegisters();
601             for (m_constantNull = 0; m_constantNull < numberOfConstants; ++m_constantNull) {
602                 JSValue testMe = m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNull);
603                 if (testMe.isNull())
604                     return getJSConstant(m_constantNull);
605             }
606
607             // Add null to the CodeBlock's constants, and add a corresponding slot in m_constants.
608             ASSERT(m_constants.size() == numberOfConstants);
609             addConstant(jsNull());
610             m_constants.append(ConstantRecord());
611             ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters());
612         }
613
614         // m_constantNull must refer to an entry in the CodeBlock's constant pool that has the value 'null'.
615         ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNull).isNull());
616         return getJSConstant(m_constantNull);
617     }
618
619     // This method returns a DoubleConstant with the value 1.
620     Node* one()
621     {
622         // Has m_constant1 been set up yet?
623         if (m_constant1 == UINT_MAX) {
624             // Search the constant pool for the value 1, if we find it, we can just reuse this!
625             unsigned numberOfConstants = m_codeBlock->numberOfConstantRegisters();
626             for (m_constant1 = 0; m_constant1 < numberOfConstants; ++m_constant1) {
627                 JSValue testMe = m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constant1);
628                 if (testMe.isInt32() && testMe.asInt32() == 1)
629                     return getJSConstant(m_constant1);
630             }
631
632             // Add the value 1 to the CodeBlock's constants, and add a corresponding slot in m_constants.
633             ASSERT(m_constants.size() == numberOfConstants);
634             addConstant(jsNumber(1));
635             m_constants.append(ConstantRecord());
636             ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters());
637         }
638
639         // m_constant1 must refer to an entry in the CodeBlock's constant pool that has the integer value 1.
640         ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constant1).isInt32());
641         ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constant1).asInt32() == 1);
642         return getJSConstant(m_constant1);
643     }
644     
645     // This method returns a DoubleConstant with the value NaN.
646     Node* constantNaN()
647     {
648         JSValue nan = jsNaN();
649         
650         // Has m_constantNaN been set up yet?
651         if (m_constantNaN == UINT_MAX) {
652             // Search the constant pool for the value NaN, if we find it, we can just reuse this!
653             unsigned numberOfConstants = m_codeBlock->numberOfConstantRegisters();
654             for (m_constantNaN = 0; m_constantNaN < numberOfConstants; ++m_constantNaN) {
655                 JSValue testMe = m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNaN);
656                 if (JSValue::encode(testMe) == JSValue::encode(nan))
657                     return getJSConstant(m_constantNaN);
658             }
659
660             // Add the value nan to the CodeBlock's constants, and add a corresponding slot in m_constants.
661             ASSERT(m_constants.size() == numberOfConstants);
662             addConstant(nan);
663             m_constants.append(ConstantRecord());
664             ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters());
665         }
666
667         // m_constantNaN must refer to an entry in the CodeBlock's constant pool that has the value nan.
668         ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNaN).isDouble());
669         ASSERT(std::isnan(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNaN).asDouble()));
670         return getJSConstant(m_constantNaN);
671     }
672     
673     Node* cellConstant(JSCell* cell)
674     {
675         HashMap<JSCell*, Node*>::AddResult result = m_cellConstantNodes.add(cell, 0);
676         if (result.isNewEntry)
677             result.iterator->value = addToGraph(WeakJSConstant, OpInfo(cell));
678         
679         return result.iterator->value;
680     }
681     
682     InlineCallFrame* inlineCallFrame()
683     {
684         return m_inlineStackTop->m_inlineCallFrame;
685     }
686
687     CodeOrigin currentCodeOrigin()
688     {
689         return CodeOrigin(m_currentIndex, inlineCallFrame());
690     }
691     
692     bool canFold(Node* node)
693     {
694         return node->isStronglyProvedConstantIn(inlineCallFrame());
695     }
696
697     // Our codegen for constant strict equality performs a bitwise comparison,
698     // so we can only select values that have a consistent bitwise identity.
699     bool isConstantForCompareStrictEq(Node* node)
700     {
701         if (!node->isConstant())
702             return false;
703         JSValue value = valueOfJSConstant(node);
704         return value.isBoolean() || value.isUndefinedOrNull();
705     }
706     
707     Node* addToGraph(NodeType op, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
708     {
709         Node* result = m_graph.addNode(
710             SpecNone, op, currentCodeOrigin(), Edge(child1), Edge(child2), Edge(child3));
711         ASSERT(op != Phi);
712         m_currentBlock->append(result);
713         return result;
714     }
715     Node* addToGraph(NodeType op, Edge child1, Edge child2 = Edge(), Edge child3 = Edge())
716     {
717         Node* result = m_graph.addNode(
718             SpecNone, op, currentCodeOrigin(), child1, child2, child3);
719         ASSERT(op != Phi);
720         m_currentBlock->append(result);
721         return result;
722     }
723     Node* addToGraph(NodeType op, OpInfo info, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
724     {
725         Node* result = m_graph.addNode(
726             SpecNone, op, currentCodeOrigin(), info, Edge(child1), Edge(child2), Edge(child3));
727         ASSERT(op != Phi);
728         m_currentBlock->append(result);
729         return result;
730     }
731     Node* addToGraph(NodeType op, OpInfo info1, OpInfo info2, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0)
732     {
733         Node* result = m_graph.addNode(
734             SpecNone, op, currentCodeOrigin(), info1, info2,
735             Edge(child1), Edge(child2), Edge(child3));
736         ASSERT(op != Phi);
737         m_currentBlock->append(result);
738         return result;
739     }
740     
741     Node* addToGraph(Node::VarArgTag, NodeType op, OpInfo info1, OpInfo info2)
742     {
743         Node* result = m_graph.addNode(
744             SpecNone, Node::VarArg, op, currentCodeOrigin(), info1, info2,
745             m_graph.m_varArgChildren.size() - m_numPassedVarArgs, m_numPassedVarArgs);
746         ASSERT(op != Phi);
747         m_currentBlock->append(result);
748         
749         m_numPassedVarArgs = 0;
750         
751         return result;
752     }
753
754     void addVarArgChild(Node* child)
755     {
756         m_graph.m_varArgChildren.append(Edge(child));
757         m_numPassedVarArgs++;
758     }
759     
760     Node* addCall(Instruction* currentInstruction, NodeType op)
761     {
762         SpeculatedType prediction = getPrediction();
763         
764         addVarArgChild(get(currentInstruction[2].u.operand));
765         int argCount = currentInstruction[3].u.operand;
766         if (JSStack::CallFrameHeaderSize + (unsigned)argCount > m_parameterSlots)
767             m_parameterSlots = JSStack::CallFrameHeaderSize + argCount;
768
769         int registerOffset = currentInstruction[4].u.operand;
770         int dummyThisArgument = op == Call ? 0 : 1;
771         for (int i = 0 + dummyThisArgument; i < argCount; ++i)
772             addVarArgChild(get(registerOffset + argumentToOperand(i)));
773
774         Node* call = addToGraph(Node::VarArg, op, OpInfo(0), OpInfo(prediction));
775         set(currentInstruction[1].u.operand, call);
776         return call;
777     }
778     
779     Node* cellConstantWithStructureCheck(JSCell* object, Structure* structure)
780     {
781         Node* objectNode = cellConstant(object);
782         addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(structure)), objectNode);
783         return objectNode;
784     }
785     
786     Node* cellConstantWithStructureCheck(JSCell* object)
787     {
788         return cellConstantWithStructureCheck(object, object->structure());
789     }
790
791     SpeculatedType getPredictionWithoutOSRExit(unsigned bytecodeIndex)
792     {
793         ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
794         return m_inlineStackTop->m_profiledBlock->valueProfilePredictionForBytecodeOffset(locker, bytecodeIndex);
795     }
796
797     SpeculatedType getPrediction(unsigned bytecodeIndex)
798     {
799         SpeculatedType prediction = getPredictionWithoutOSRExit(bytecodeIndex);
800         
801         if (prediction == SpecNone) {
802             // We have no information about what values this node generates. Give up
803             // on executing this code, since we're likely to do more damage than good.
804             addToGraph(ForceOSRExit);
805         }
806         
807         return prediction;
808     }
809     
810     SpeculatedType getPredictionWithoutOSRExit()
811     {
812         return getPredictionWithoutOSRExit(m_currentIndex);
813     }
814     
815     SpeculatedType getPrediction()
816     {
817         return getPrediction(m_currentIndex);
818     }
819     
820     ArrayMode getArrayMode(ArrayProfile* profile, Array::Action action)
821     {
822         ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
823         profile->computeUpdatedPrediction(locker, m_inlineStackTop->m_profiledBlock);
824         return ArrayMode::fromObserved(locker, profile, action, false);
825     }
826     
827     ArrayMode getArrayMode(ArrayProfile* profile)
828     {
829         return getArrayMode(profile, Array::Read);
830     }
831     
832     ArrayMode getArrayModeConsideringSlowPath(ArrayProfile* profile, Array::Action action)
833     {
834         ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
835         
836         profile->computeUpdatedPrediction(locker, m_inlineStackTop->m_profiledBlock);
837         
838 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
839         if (m_inlineStackTop->m_profiledBlock->numberOfRareCaseProfiles())
840             dataLogF("Slow case profile for bc#%u: %u\n", m_currentIndex, m_inlineStackTop->m_profiledBlock->rareCaseProfileForBytecodeOffset(m_currentIndex)->m_counter);
841         dataLogF("Array profile for bc#%u: %u %s%s\n", m_currentIndex, profile->observedArrayModes(locker), profile->structureIsPolymorphic(locker) ? " (polymorphic)" : "", profile->mayInterceptIndexedAccesses(locker) ? " (may intercept)" : "");
842 #endif
843         
844         bool makeSafe =
845             m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)
846             || profile->outOfBounds(locker);
847         
848         ArrayMode result = ArrayMode::fromObserved(locker, profile, action, makeSafe);
849         
850         return result;
851     }
852     
853     Node* makeSafe(Node* node)
854     {
855         bool likelyToTakeSlowCase;
856         if (!isX86() && node->op() == ArithMod)
857             likelyToTakeSlowCase = false;
858         else
859             likelyToTakeSlowCase = m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex);
860         
861         if (!likelyToTakeSlowCase
862             && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow)
863             && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
864             return node;
865         
866         switch (node->op()) {
867         case UInt32ToNumber:
868         case ArithAdd:
869         case ArithSub:
870         case ArithNegate:
871         case ValueAdd:
872         case ArithMod: // for ArithMod "MayOverflow" means we tried to divide by zero, or we saw double.
873             node->mergeFlags(NodeMayOverflow);
874             break;
875             
876         case ArithMul:
877             if (m_inlineStackTop->m_profiledBlock->likelyToTakeDeepestSlowCase(m_currentIndex)
878                 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow)) {
879 #if DFG_ENABLE(DEBUG_VERBOSE)
880                 dataLogF("Making ArithMul @%u take deepest slow case.\n", node->index());
881 #endif
882                 node->mergeFlags(NodeMayOverflow | NodeMayNegZero);
883             } else if (m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)
884                        || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero)) {
885 #if DFG_ENABLE(DEBUG_VERBOSE)
886                 dataLogF("Making ArithMul @%u take faster slow case.\n", node->index());
887 #endif
888                 node->mergeFlags(NodeMayNegZero);
889             }
890             break;
891             
892         default:
893             RELEASE_ASSERT_NOT_REACHED();
894             break;
895         }
896         
897         return node;
898     }
899     
900     Node* makeDivSafe(Node* node)
901     {
902         ASSERT(node->op() == ArithDiv);
903         
904         // The main slow case counter for op_div in the old JIT counts only when
905         // the operands are not numbers. We don't care about that since we already
906         // have speculations in place that take care of that separately. We only
907         // care about when the outcome of the division is not an integer, which
908         // is what the special fast case counter tells us.
909         
910         if (!m_inlineStackTop->m_profiledBlock->couldTakeSpecialFastCase(m_currentIndex)
911             && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow)
912             && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
913             return node;
914         
915 #if DFG_ENABLE(DEBUG_VERBOSE)
916         dataLogF("Making %s @%u safe at bc#%u because special fast-case counter is at %u and exit profiles say %d, %d\n", Graph::opName(node->op()), node->index(), m_currentIndex, m_inlineStackTop->m_profiledBlock->specialFastCaseProfileForBytecodeOffset(m_currentIndex)->m_counter, m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow), m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero));
917 #endif
918         
919         // FIXME: It might be possible to make this more granular. The DFG certainly can
920         // distinguish between negative zero and overflow in its exit profiles.
921         node->mergeFlags(NodeMayOverflow | NodeMayNegZero);
922         
923         return node;
924     }
925     
926     bool structureChainIsStillValid(bool direct, Structure* previousStructure, StructureChain* chain)
927     {
928         if (direct)
929             return true;
930         
931         if (!previousStructure->storedPrototype().isNull() && previousStructure->storedPrototype().asCell()->structure() != chain->head()->get())
932             return false;
933         
934         for (WriteBarrier<Structure>* it = chain->head(); *it; ++it) {
935             if (!(*it)->storedPrototype().isNull() && (*it)->storedPrototype().asCell()->structure() != it[1].get())
936                 return false;
937         }
938         
939         return true;
940     }
941     
942     void buildOperandMapsIfNecessary();
943     
944     VM* m_vm;
945     CodeBlock* m_codeBlock;
946     CodeBlock* m_profiledBlock;
947     Graph& m_graph;
948
949     // The current block being generated.
950     BasicBlock* m_currentBlock;
951     // The bytecode index of the current instruction being generated.
952     unsigned m_currentIndex;
953
954     // We use these values during code generation, and to avoid the need for
955     // special handling we make sure they are available as constants in the
956     // CodeBlock's constant pool. These variables are initialized to
957     // UINT_MAX, and lazily updated to hold an index into the CodeBlock's
958     // constant pool, as necessary.
959     unsigned m_constantUndefined;
960     unsigned m_constantNull;
961     unsigned m_constantNaN;
962     unsigned m_constant1;
963     HashMap<JSCell*, unsigned> m_cellConstants;
964     HashMap<JSCell*, Node*> m_cellConstantNodes;
965
966     // A constant in the constant pool may be represented by more than one
967     // node in the graph, depending on the context in which it is being used.
968     struct ConstantRecord {
969         ConstantRecord()
970             : asInt32(0)
971             , asNumeric(0)
972             , asJSValue(0)
973         {
974         }
975
976         Node* asInt32;
977         Node* asNumeric;
978         Node* asJSValue;
979     };
980
981     // Track the index of the node whose result is the current value for every
982     // register value in the bytecode - argument, local, and temporary.
983     Vector<ConstantRecord, 16> m_constants;
984
985     // The number of arguments passed to the function.
986     unsigned m_numArguments;
987     // The number of locals (vars + temporaries) used in the function.
988     unsigned m_numLocals;
989     // The set of registers we need to preserve across BasicBlock boundaries;
990     // typically equal to the set of vars, but we expand this to cover all
991     // temporaries that persist across blocks (dues to ?:, &&, ||, etc).
992     BitVector m_preservedVars;
993     // The number of slots (in units of sizeof(Register)) that we need to
994     // preallocate for calls emanating from this frame. This includes the
995     // size of the CallFrame, only if this is not a leaf function.  (I.e.
996     // this is 0 if and only if this function is a leaf.)
997     unsigned m_parameterSlots;
998     // The number of var args passed to the next var arg node.
999     unsigned m_numPassedVarArgs;
1000
1001     HashMap<ConstantBufferKey, unsigned> m_constantBufferCache;
1002     
1003     struct InlineStackEntry {
1004         ByteCodeParser* m_byteCodeParser;
1005         
1006         CodeBlock* m_codeBlock;
1007         CodeBlock* m_profiledBlock;
1008         InlineCallFrame* m_inlineCallFrame;
1009         
1010         ScriptExecutable* executable() { return m_codeBlock->ownerExecutable(); }
1011         
1012         QueryableExitProfile m_exitProfile;
1013         
1014         // Remapping of identifier and constant numbers from the code block being
1015         // inlined (inline callee) to the code block that we're inlining into
1016         // (the machine code block, which is the transitive, though not necessarily
1017         // direct, caller).
1018         Vector<unsigned> m_identifierRemap;
1019         Vector<unsigned> m_constantRemap;
1020         Vector<unsigned> m_constantBufferRemap;
1021         Vector<unsigned> m_switchRemap;
1022         
1023         // Blocks introduced by this code block, which need successor linking.
1024         // May include up to one basic block that includes the continuation after
1025         // the callsite in the caller. These must be appended in the order that they
1026         // are created, but their bytecodeBegin values need not be in order as they
1027         // are ignored.
1028         Vector<UnlinkedBlock> m_unlinkedBlocks;
1029         
1030         // Potential block linking targets. Must be sorted by bytecodeBegin, and
1031         // cannot have two blocks that have the same bytecodeBegin. For this very
1032         // reason, this is not equivalent to 
1033         Vector<BasicBlock*> m_blockLinkingTargets;
1034         
1035         // If the callsite's basic block was split into two, then this will be
1036         // the head of the callsite block. It needs its successors linked to the
1037         // m_unlinkedBlocks, but not the other way around: there's no way for
1038         // any blocks in m_unlinkedBlocks to jump back into this block.
1039         BasicBlock* m_callsiteBlockHead;
1040         
1041         // Does the callsite block head need linking? This is typically true
1042         // but will be false for the machine code block's inline stack entry
1043         // (since that one is not inlined) and for cases where an inline callee
1044         // did the linking for us.
1045         bool m_callsiteBlockHeadNeedsLinking;
1046         
1047         VirtualRegister m_returnValue;
1048         
1049         // Speculations about variable types collected from the profiled code block,
1050         // which are based on OSR exit profiles that past DFG compilatins of this
1051         // code block had gathered.
1052         LazyOperandValueProfileParser m_lazyOperands;
1053         
1054         // Did we see any returns? We need to handle the (uncommon but necessary)
1055         // case where a procedure that does not return was inlined.
1056         bool m_didReturn;
1057         
1058         // Did we have any early returns?
1059         bool m_didEarlyReturn;
1060         
1061         // Pointers to the argument position trackers for this slice of code.
1062         Vector<ArgumentPosition*> m_argumentPositions;
1063         
1064         InlineStackEntry* m_caller;
1065         
1066         InlineStackEntry(
1067             ByteCodeParser*,
1068             CodeBlock*,
1069             CodeBlock* profiledBlock,
1070             BasicBlock* callsiteBlockHead,
1071             JSFunction* callee, // Null if this is a closure call.
1072             VirtualRegister returnValueVR,
1073             VirtualRegister inlineCallFrameStart,
1074             int argumentCountIncludingThis,
1075             CodeSpecializationKind);
1076         
1077         ~InlineStackEntry()
1078         {
1079             m_byteCodeParser->m_inlineStackTop = m_caller;
1080         }
1081         
1082         int remapOperand(int operand) const
1083         {
1084             if (!m_inlineCallFrame)
1085                 return operand;
1086             
1087             if (operand >= FirstConstantRegisterIndex) {
1088                 int result = m_constantRemap[operand - FirstConstantRegisterIndex];
1089                 ASSERT(result >= FirstConstantRegisterIndex);
1090                 return result;
1091             }
1092
1093             ASSERT(operand != JSStack::Callee);
1094
1095             return operand + m_inlineCallFrame->stackOffset;
1096         }
1097     };
1098     
1099     InlineStackEntry* m_inlineStackTop;
1100
1101     // Have we built operand maps? We initialize them lazily, and only when doing
1102     // inlining.
1103     bool m_haveBuiltOperandMaps;
1104     // Mapping between identifier names and numbers.
1105     BorrowedIdentifierMap m_identifierMap;
1106     // Mapping between values and constant numbers.
1107     JSValueMap m_jsValueMap;
1108     // Index of the empty value, or UINT_MAX if there is no mapping. This is a horrible
1109     // work-around for the fact that JSValueMap can't handle "empty" values.
1110     unsigned m_emptyJSValueIndex;
1111     
1112     Instruction* m_currentInstruction;
1113 };
1114
1115 #define NEXT_OPCODE(name) \
1116     m_currentIndex += OPCODE_LENGTH(name); \
1117     continue
1118
1119 #define LAST_OPCODE(name) \
1120     m_currentIndex += OPCODE_LENGTH(name); \
1121     return shouldContinueParsing
1122
1123
1124 void ByteCodeParser::handleCall(Instruction* currentInstruction, NodeType op, CodeSpecializationKind kind)
1125 {
1126     ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct));
1127     
1128     Node* callTarget = get(currentInstruction[2].u.operand);
1129     
1130     CallLinkStatus callLinkStatus;
1131
1132     if (m_graph.isConstant(callTarget))
1133         callLinkStatus = CallLinkStatus(m_graph.valueOfJSConstant(callTarget)).setIsProved(true);
1134     else {
1135         callLinkStatus = CallLinkStatus::computeFor(m_inlineStackTop->m_profiledBlock, m_currentIndex);
1136         callLinkStatus.setHasBadFunctionExitSite(m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadFunction));
1137         callLinkStatus.setHasBadCacheExitSite(m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
1138         callLinkStatus.setHasBadExecutableExitSite(m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadExecutable));
1139     }
1140     
1141 #if DFG_ENABLE(DEBUG_VERBOSE)
1142     dataLog("For call at bc#", m_currentIndex, ": ", callLinkStatus, "\n");
1143 #endif
1144     
1145     if (!callLinkStatus.canOptimize()) {
1146         // Oddly, this conflates calls that haven't executed with calls that behaved sufficiently polymorphically
1147         // that we cannot optimize them.
1148         
1149         addCall(currentInstruction, op);
1150         return;
1151     }
1152     
1153     int argumentCountIncludingThis = currentInstruction[3].u.operand;
1154     int registerOffset = currentInstruction[4].u.operand;
1155
1156     int resultOperand = currentInstruction[1].u.operand;
1157     unsigned nextOffset = m_currentIndex + OPCODE_LENGTH(op_call);
1158     SpeculatedType prediction = getPrediction();
1159
1160     if (InternalFunction* function = callLinkStatus.internalFunction()) {
1161         if (handleConstantInternalFunction(resultOperand, function, registerOffset, argumentCountIncludingThis, prediction, kind)) {
1162             // This phantoming has to be *after* the code for the intrinsic, to signify that
1163             // the inputs must be kept alive whatever exits the intrinsic may do.
1164             addToGraph(Phantom, callTarget);
1165             emitArgumentPhantoms(registerOffset, argumentCountIncludingThis, kind);
1166             return;
1167         }
1168         
1169         // Can only handle this using the generic call handler.
1170         addCall(currentInstruction, op);
1171         return;
1172     }
1173         
1174     Intrinsic intrinsic = callLinkStatus.intrinsicFor(kind);
1175     if (intrinsic != NoIntrinsic) {
1176         emitFunctionChecks(callLinkStatus, callTarget, registerOffset, kind);
1177             
1178         if (handleIntrinsic(resultOperand, intrinsic, registerOffset, argumentCountIncludingThis, prediction)) {
1179             // This phantoming has to be *after* the code for the intrinsic, to signify that
1180             // the inputs must be kept alive whatever exits the intrinsic may do.
1181             addToGraph(Phantom, callTarget);
1182             emitArgumentPhantoms(registerOffset, argumentCountIncludingThis, kind);
1183             if (m_graph.compilation())
1184                 m_graph.compilation()->noticeInlinedCall();
1185             return;
1186         }
1187     } else if (handleInlining(callTarget, resultOperand, callLinkStatus, registerOffset, argumentCountIncludingThis, nextOffset, kind)) {
1188         if (m_graph.compilation())
1189             m_graph.compilation()->noticeInlinedCall();
1190         return;
1191     }
1192     
1193     addCall(currentInstruction, op);
1194 }
1195
1196 void ByteCodeParser::emitFunctionChecks(const CallLinkStatus& callLinkStatus, Node* callTarget, int registerOffset, CodeSpecializationKind kind)
1197 {
1198     Node* thisArgument;
1199     if (kind == CodeForCall)
1200         thisArgument = get(registerOffset + argumentToOperand(0));
1201     else
1202         thisArgument = 0;
1203
1204     if (callLinkStatus.isProved()) {
1205         addToGraph(Phantom, callTarget, thisArgument);
1206         return;
1207     }
1208     
1209     ASSERT(callLinkStatus.canOptimize());
1210     
1211     if (JSFunction* function = callLinkStatus.function())
1212         addToGraph(CheckFunction, OpInfo(function), callTarget, thisArgument);
1213     else {
1214         ASSERT(callLinkStatus.structure());
1215         ASSERT(callLinkStatus.executable());
1216         
1217         addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(callLinkStatus.structure())), callTarget);
1218         addToGraph(CheckExecutable, OpInfo(callLinkStatus.executable()), callTarget, thisArgument);
1219     }
1220 }
1221
1222 void ByteCodeParser::emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis, CodeSpecializationKind kind)
1223 {
1224     for (int i = kind == CodeForCall ? 0 : 1; i < argumentCountIncludingThis; ++i)
1225         addToGraph(Phantom, get(registerOffset + argumentToOperand(i)));
1226 }
1227
1228 bool ByteCodeParser::handleInlining(Node* callTargetNode, int resultOperand, const CallLinkStatus& callLinkStatus, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, CodeSpecializationKind kind)
1229 {
1230     // First, the really simple checks: do we have an actual JS function?
1231     if (!callLinkStatus.executable())
1232         return false;
1233     if (callLinkStatus.executable()->isHostFunction())
1234         return false;
1235     
1236     FunctionExecutable* executable = jsCast<FunctionExecutable*>(callLinkStatus.executable());
1237     
1238     // Does the number of arguments we're passing match the arity of the target? We currently
1239     // inline only if the number of arguments passed is greater than or equal to the number
1240     // arguments expected.
1241     if (static_cast<int>(executable->parameterCount()) + 1 > argumentCountIncludingThis)
1242         return false;
1243     
1244     // Have we exceeded inline stack depth, or are we trying to inline a recursive call?
1245     // If either of these are detected, then don't inline.
1246     unsigned depth = 0;
1247     for (InlineStackEntry* entry = m_inlineStackTop; entry; entry = entry->m_caller) {
1248         ++depth;
1249         if (depth >= Options::maximumInliningDepth())
1250             return false; // Depth exceeded.
1251         
1252         if (entry->executable() == executable)
1253             return false; // Recursion detected.
1254     }
1255     
1256     // Do we have a code block, and does the code block's size match the heuristics/requirements for
1257     // being an inline candidate? We might not have a code block if code was thrown away or if we
1258     // simply hadn't actually made this call yet. We could still theoretically attempt to inline it
1259     // if we had a static proof of what was being called; this might happen for example if you call a
1260     // global function, where watchpointing gives us static information. Overall, it's a rare case
1261     // because we expect that any hot callees would have already been compiled.
1262     CodeBlock* codeBlock = executable->baselineCodeBlockFor(kind);
1263     if (!codeBlock)
1264         return false;
1265     if (!canInlineFunctionFor(codeBlock, kind, callLinkStatus.isClosureCall()))
1266         return false;
1267     
1268 #if DFG_ENABLE(DEBUG_VERBOSE)
1269     dataLogF("Inlining executable %p.\n", executable);
1270 #endif
1271     
1272     // Now we know without a doubt that we are committed to inlining. So begin the process
1273     // by checking the callee (if necessary) and making sure that arguments and the callee
1274     // are flushed.
1275     emitFunctionChecks(callLinkStatus, callTargetNode, registerOffset, kind);
1276     
1277     // FIXME: Don't flush constants!
1278     
1279     int inlineCallFrameStart = m_inlineStackTop->remapOperand(registerOffset) - JSStack::CallFrameHeaderSize;
1280     
1281     // Make sure that the area used by the call frame is reserved.
1282     for (int arg = inlineCallFrameStart + JSStack::CallFrameHeaderSize + codeBlock->m_numVars; arg-- > inlineCallFrameStart;)
1283         m_preservedVars.set(arg);
1284     
1285     // Make sure that we have enough locals.
1286     unsigned newNumLocals = inlineCallFrameStart + JSStack::CallFrameHeaderSize + codeBlock->m_numCalleeRegisters;
1287     if (newNumLocals > m_numLocals) {
1288         m_numLocals = newNumLocals;
1289         for (size_t i = 0; i < m_graph.numBlocks(); ++i)
1290             m_graph.block(i)->ensureLocals(newNumLocals);
1291     }
1292     
1293     size_t argumentPositionStart = m_graph.m_argumentPositions.size();
1294
1295     InlineStackEntry inlineStackEntry(
1296         this, codeBlock, codeBlock, m_graph.lastBlock(), callLinkStatus.function(),
1297         (VirtualRegister)m_inlineStackTop->remapOperand(resultOperand),
1298         (VirtualRegister)inlineCallFrameStart, argumentCountIncludingThis, kind);
1299     
1300     // This is where the actual inlining really happens.
1301     unsigned oldIndex = m_currentIndex;
1302     m_currentIndex = 0;
1303
1304     addToGraph(InlineStart, OpInfo(argumentPositionStart));
1305     if (callLinkStatus.isClosureCall()) {
1306         addToGraph(SetCallee, callTargetNode);
1307         addToGraph(SetMyScope, addToGraph(GetScope, callTargetNode));
1308     }
1309     
1310     parseCodeBlock();
1311     
1312     m_currentIndex = oldIndex;
1313     
1314     // If the inlined code created some new basic blocks, then we have linking to do.
1315     if (inlineStackEntry.m_callsiteBlockHead != m_graph.lastBlock()) {
1316         
1317         ASSERT(!inlineStackEntry.m_unlinkedBlocks.isEmpty());
1318         if (inlineStackEntry.m_callsiteBlockHeadNeedsLinking)
1319             linkBlock(inlineStackEntry.m_callsiteBlockHead, inlineStackEntry.m_blockLinkingTargets);
1320         else
1321             ASSERT(inlineStackEntry.m_callsiteBlockHead->isLinked);
1322         
1323         // It's possible that the callsite block head is not owned by the caller.
1324         if (!inlineStackEntry.m_caller->m_unlinkedBlocks.isEmpty()) {
1325             // It's definitely owned by the caller, because the caller created new blocks.
1326             // Assert that this all adds up.
1327             ASSERT(inlineStackEntry.m_caller->m_unlinkedBlocks.last().m_block == inlineStackEntry.m_callsiteBlockHead);
1328             ASSERT(inlineStackEntry.m_caller->m_unlinkedBlocks.last().m_needsNormalLinking);
1329             inlineStackEntry.m_caller->m_unlinkedBlocks.last().m_needsNormalLinking = false;
1330         } else {
1331             // It's definitely not owned by the caller. Tell the caller that he does not
1332             // need to link his callsite block head, because we did it for him.
1333             ASSERT(inlineStackEntry.m_caller->m_callsiteBlockHeadNeedsLinking);
1334             ASSERT(inlineStackEntry.m_caller->m_callsiteBlockHead == inlineStackEntry.m_callsiteBlockHead);
1335             inlineStackEntry.m_caller->m_callsiteBlockHeadNeedsLinking = false;
1336         }
1337         
1338         linkBlocks(inlineStackEntry.m_unlinkedBlocks, inlineStackEntry.m_blockLinkingTargets);
1339     } else
1340         ASSERT(inlineStackEntry.m_unlinkedBlocks.isEmpty());
1341     
1342     BasicBlock* lastBlock = m_graph.lastBlock();
1343     // If there was a return, but no early returns, then we're done. We allow parsing of
1344     // the caller to continue in whatever basic block we're in right now.
1345     if (!inlineStackEntry.m_didEarlyReturn && inlineStackEntry.m_didReturn) {
1346         ASSERT(lastBlock->isEmpty() || !lastBlock->last()->isTerminal());
1347         
1348         // If we created new blocks then the last block needs linking, but in the
1349         // caller. It doesn't need to be linked to, but it needs outgoing links.
1350         if (!inlineStackEntry.m_unlinkedBlocks.isEmpty()) {
1351 #if DFG_ENABLE(DEBUG_VERBOSE)
1352             dataLogF("Reascribing bytecode index of block %p from bc#%u to bc#%u (inline return case).\n", lastBlock, lastBlock->bytecodeBegin, m_currentIndex);
1353 #endif
1354             // For debugging purposes, set the bytecodeBegin. Note that this doesn't matter
1355             // for release builds because this block will never serve as a potential target
1356             // in the linker's binary search.
1357             lastBlock->bytecodeBegin = m_currentIndex;
1358             m_inlineStackTop->m_caller->m_unlinkedBlocks.append(UnlinkedBlock(m_graph.lastBlock()));
1359         }
1360         
1361         m_currentBlock = m_graph.lastBlock();
1362         
1363 #if DFG_ENABLE(DEBUG_VERBOSE)
1364         dataLogF("Done inlining executable %p, continuing code generation at epilogue.\n", executable);
1365 #endif
1366         return true;
1367     }
1368     
1369     // If we get to this point then all blocks must end in some sort of terminals.
1370     ASSERT(lastBlock->last()->isTerminal());
1371     
1372
1373     // Need to create a new basic block for the continuation at the caller.
1374     RefPtr<BasicBlock> block = adoptRef(new BasicBlock(nextOffset, m_numArguments, m_numLocals));
1375
1376 #if DFG_ENABLE(DEBUG_VERBOSE)
1377     dataLogF("Creating inline epilogue basic block %p, #%zu for %p bc#%u at inline depth %u.\n", block.get(), m_graph.numBlocks(), m_inlineStackTop->executable(), m_currentIndex, CodeOrigin::inlineDepthForCallFrame(inlineCallFrame()));
1378 #endif
1379
1380     // Link the early returns to the basic block we're about to create.
1381     for (size_t i = 0; i < inlineStackEntry.m_unlinkedBlocks.size(); ++i) {
1382         if (!inlineStackEntry.m_unlinkedBlocks[i].m_needsEarlyReturnLinking)
1383             continue;
1384         BasicBlock* blockToLink = inlineStackEntry.m_unlinkedBlocks[i].m_block;
1385         ASSERT(!blockToLink->isLinked);
1386         Node* node = blockToLink->last();
1387         ASSERT(node->op() == Jump);
1388         ASSERT(node->takenBlock() == 0);
1389         node->setTakenBlock(block.get());
1390         inlineStackEntry.m_unlinkedBlocks[i].m_needsEarlyReturnLinking = false;
1391 #if !ASSERT_DISABLED
1392         blockToLink->isLinked = true;
1393 #endif
1394     }
1395     
1396     m_currentBlock = block.get();
1397     ASSERT(m_inlineStackTop->m_caller->m_blockLinkingTargets.isEmpty() || m_inlineStackTop->m_caller->m_blockLinkingTargets.last()->bytecodeBegin < nextOffset);
1398     m_inlineStackTop->m_caller->m_unlinkedBlocks.append(UnlinkedBlock(block.get()));
1399     m_inlineStackTop->m_caller->m_blockLinkingTargets.append(block.get());
1400     m_graph.appendBlock(block);
1401     prepareToParseBlock();
1402     
1403     // At this point we return and continue to generate code for the caller, but
1404     // in the new basic block.
1405 #if DFG_ENABLE(DEBUG_VERBOSE)
1406     dataLogF("Done inlining executable %p, continuing code generation in new block.\n", executable);
1407 #endif
1408     return true;
1409 }
1410
1411 bool ByteCodeParser::handleMinMax(int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis)
1412 {
1413     if (argumentCountIncludingThis == 1) { // Math.min()
1414         set(resultOperand, constantNaN());
1415         return true;
1416     }
1417      
1418     if (argumentCountIncludingThis == 2) { // Math.min(x)
1419         Node* result = get(registerOffset + argumentToOperand(1));
1420         addToGraph(Phantom, Edge(result, NumberUse));
1421         set(resultOperand, result);
1422         return true;
1423     }
1424     
1425     if (argumentCountIncludingThis == 3) { // Math.min(x, y)
1426         set(resultOperand, addToGraph(op, get(registerOffset + argumentToOperand(1)), get(registerOffset + argumentToOperand(2))));
1427         return true;
1428     }
1429     
1430     // Don't handle >=3 arguments for now.
1431     return false;
1432 }
1433
1434 // FIXME: We dead-code-eliminate unused Math intrinsics, but that's invalid because
1435 // they need to perform the ToNumber conversion, which can have side-effects.
1436 bool ByteCodeParser::handleIntrinsic(int resultOperand, Intrinsic intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction)
1437 {
1438     switch (intrinsic) {
1439     case AbsIntrinsic: {
1440         if (argumentCountIncludingThis == 1) { // Math.abs()
1441             set(resultOperand, constantNaN());
1442             return true;
1443         }
1444
1445         if (!MacroAssembler::supportsFloatingPointAbs())
1446             return false;
1447
1448         Node* node = addToGraph(ArithAbs, get(registerOffset + argumentToOperand(1)));
1449         if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
1450             node->mergeFlags(NodeMayOverflow);
1451         set(resultOperand, node);
1452         return true;
1453     }
1454
1455     case MinIntrinsic:
1456         return handleMinMax(resultOperand, ArithMin, registerOffset, argumentCountIncludingThis);
1457         
1458     case MaxIntrinsic:
1459         return handleMinMax(resultOperand, ArithMax, registerOffset, argumentCountIncludingThis);
1460         
1461     case SqrtIntrinsic: {
1462         if (argumentCountIncludingThis == 1) { // Math.sqrt()
1463             set(resultOperand, constantNaN());
1464             return true;
1465         }
1466         
1467         if (!MacroAssembler::supportsFloatingPointSqrt())
1468             return false;
1469
1470         set(resultOperand, addToGraph(ArithSqrt, get(registerOffset + argumentToOperand(1))));
1471         return true;
1472     }
1473         
1474     case ArrayPushIntrinsic: {
1475         if (argumentCountIncludingThis != 2)
1476             return false;
1477         
1478         ArrayMode arrayMode = getArrayMode(m_currentInstruction[6].u.arrayProfile);
1479         if (!arrayMode.isJSArray())
1480             return false;
1481         switch (arrayMode.type()) {
1482         case Array::Undecided:
1483         case Array::Int32:
1484         case Array::Double:
1485         case Array::Contiguous:
1486         case Array::ArrayStorage: {
1487             Node* arrayPush = addToGraph(ArrayPush, OpInfo(arrayMode.asWord()), OpInfo(prediction), get(registerOffset + argumentToOperand(0)), get(registerOffset + argumentToOperand(1)));
1488             set(resultOperand, arrayPush);
1489             
1490             return true;
1491         }
1492             
1493         default:
1494             return false;
1495         }
1496     }
1497         
1498     case ArrayPopIntrinsic: {
1499         if (argumentCountIncludingThis != 1)
1500             return false;
1501         
1502         ArrayMode arrayMode = getArrayMode(m_currentInstruction[6].u.arrayProfile);
1503         if (!arrayMode.isJSArray())
1504             return false;
1505         switch (arrayMode.type()) {
1506         case Array::Int32:
1507         case Array::Double:
1508         case Array::Contiguous:
1509         case Array::ArrayStorage: {
1510             Node* arrayPop = addToGraph(ArrayPop, OpInfo(arrayMode.asWord()), OpInfo(prediction), get(registerOffset + argumentToOperand(0)));
1511             set(resultOperand, arrayPop);
1512             return true;
1513         }
1514             
1515         default:
1516             return false;
1517         }
1518     }
1519
1520     case CharCodeAtIntrinsic: {
1521         if (argumentCountIncludingThis != 2)
1522             return false;
1523
1524         int thisOperand = registerOffset + argumentToOperand(0);
1525         int indexOperand = registerOffset + argumentToOperand(1);
1526         Node* charCode = addToGraph(StringCharCodeAt, OpInfo(ArrayMode(Array::String).asWord()), get(thisOperand), getToInt32(indexOperand));
1527
1528         set(resultOperand, charCode);
1529         return true;
1530     }
1531
1532     case CharAtIntrinsic: {
1533         if (argumentCountIncludingThis != 2)
1534             return false;
1535
1536         int thisOperand = registerOffset + argumentToOperand(0);
1537         int indexOperand = registerOffset + argumentToOperand(1);
1538         Node* charCode = addToGraph(StringCharAt, OpInfo(ArrayMode(Array::String).asWord()), get(thisOperand), getToInt32(indexOperand));
1539
1540         set(resultOperand, charCode);
1541         return true;
1542     }
1543     case FromCharCodeIntrinsic: {
1544         if (argumentCountIncludingThis != 2)
1545             return false;
1546
1547         int indexOperand = registerOffset + argumentToOperand(1);
1548         Node* charCode = addToGraph(StringFromCharCode, getToInt32(indexOperand));
1549
1550         set(resultOperand, charCode);
1551
1552         return true;
1553     }
1554
1555     case RegExpExecIntrinsic: {
1556         if (argumentCountIncludingThis != 2)
1557             return false;
1558         
1559         Node* regExpExec = addToGraph(RegExpExec, OpInfo(0), OpInfo(prediction), get(registerOffset + argumentToOperand(0)), get(registerOffset + argumentToOperand(1)));
1560         set(resultOperand, regExpExec);
1561         
1562         return true;
1563     }
1564         
1565     case RegExpTestIntrinsic: {
1566         if (argumentCountIncludingThis != 2)
1567             return false;
1568         
1569         Node* regExpExec = addToGraph(RegExpTest, OpInfo(0), OpInfo(prediction), get(registerOffset + argumentToOperand(0)), get(registerOffset + argumentToOperand(1)));
1570         set(resultOperand, regExpExec);
1571         
1572         return true;
1573     }
1574
1575     case IMulIntrinsic: {
1576         if (argumentCountIncludingThis != 3)
1577             return false;
1578         int leftOperand = registerOffset + argumentToOperand(1);
1579         int rightOperand = registerOffset + argumentToOperand(2);
1580         Node* left = getToInt32(leftOperand);
1581         Node* right = getToInt32(rightOperand);
1582         set(resultOperand, addToGraph(ArithIMul, left, right));
1583         return true;
1584     }
1585         
1586     default:
1587         return false;
1588     }
1589 }
1590
1591 bool ByteCodeParser::handleConstantInternalFunction(
1592     int resultOperand, InternalFunction* function, int registerOffset,
1593     int argumentCountIncludingThis, SpeculatedType prediction, CodeSpecializationKind kind)
1594 {
1595     // If we ever find that we have a lot of internal functions that we specialize for,
1596     // then we should probably have some sort of hashtable dispatch, or maybe even
1597     // dispatch straight through the MethodTable of the InternalFunction. But for now,
1598     // it seems that this case is hit infrequently enough, and the number of functions
1599     // we know about is small enough, that having just a linear cascade of if statements
1600     // is good enough.
1601     
1602     UNUSED_PARAM(prediction); // Remove this once we do more things.
1603     
1604     if (function->classInfo() == ArrayConstructor::info()) {
1605         if (function->globalObject() != m_inlineStackTop->m_codeBlock->globalObject())
1606             return false;
1607         
1608         if (argumentCountIncludingThis == 2) {
1609             set(resultOperand,
1610                 addToGraph(NewArrayWithSize, OpInfo(ArrayWithUndecided), get(registerOffset + argumentToOperand(1))));
1611             return true;
1612         }
1613         
1614         for (int i = 1; i < argumentCountIncludingThis; ++i)
1615             addVarArgChild(get(registerOffset + argumentToOperand(i)));
1616         set(resultOperand,
1617             addToGraph(Node::VarArg, NewArray, OpInfo(ArrayWithUndecided), OpInfo(0)));
1618         return true;
1619     } else if (function->classInfo() == StringConstructor::info()) {
1620         Node* result;
1621         
1622         if (argumentCountIncludingThis <= 1)
1623             result = cellConstant(m_vm->smallStrings.emptyString());
1624         else
1625             result = addToGraph(ToString, get(registerOffset + argumentToOperand(1)));
1626         
1627         if (kind == CodeForConstruct)
1628             result = addToGraph(NewStringObject, OpInfo(function->globalObject()->stringObjectStructure()), result);
1629         
1630         set(resultOperand, result);
1631         return true;
1632     }
1633     
1634     return false;
1635 }
1636
1637 Node* ByteCodeParser::handleGetByOffset(SpeculatedType prediction, Node* base, unsigned identifierNumber, PropertyOffset offset)
1638 {
1639     Node* propertyStorage;
1640     if (isInlineOffset(offset))
1641         propertyStorage = base;
1642     else
1643         propertyStorage = addToGraph(GetButterfly, base);
1644     Node* getByOffset = addToGraph(GetByOffset, OpInfo(m_graph.m_storageAccessData.size()), OpInfo(prediction), propertyStorage, base);
1645
1646     StorageAccessData storageAccessData;
1647     storageAccessData.offset = offset;
1648     storageAccessData.identifierNumber = identifierNumber;
1649     m_graph.m_storageAccessData.append(storageAccessData);
1650
1651     return getByOffset;
1652 }
1653
1654 void ByteCodeParser::handleGetByOffset(
1655     int destinationOperand, SpeculatedType prediction, Node* base, unsigned identifierNumber,
1656     PropertyOffset offset)
1657 {
1658     set(destinationOperand, handleGetByOffset(prediction, base, identifierNumber, offset));
1659 }
1660
1661 Node* ByteCodeParser::handlePutByOffset(Node* base, unsigned identifier, PropertyOffset offset, Node* value)
1662 {
1663     Node* propertyStorage;
1664     if (isInlineOffset(offset))
1665         propertyStorage = base;
1666     else
1667         propertyStorage = addToGraph(GetButterfly, base);
1668     Node* result = addToGraph(PutByOffset, OpInfo(m_graph.m_storageAccessData.size()), propertyStorage, base, value);
1669     
1670     StorageAccessData storageAccessData;
1671     storageAccessData.offset = offset;
1672     storageAccessData.identifierNumber = identifier;
1673     m_graph.m_storageAccessData.append(storageAccessData);
1674
1675     return result;
1676 }
1677
1678 void ByteCodeParser::handleGetById(
1679     int destinationOperand, SpeculatedType prediction, Node* base, unsigned identifierNumber,
1680     const GetByIdStatus& getByIdStatus)
1681 {
1682     if (!getByIdStatus.isSimple()
1683         || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)
1684         || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadWeakConstantCache)) {
1685         set(destinationOperand,
1686             addToGraph(
1687                 getByIdStatus.makesCalls() ? GetByIdFlush : GetById,
1688                 OpInfo(identifierNumber), OpInfo(prediction), base));
1689         return;
1690     }
1691     
1692     ASSERT(getByIdStatus.structureSet().size());
1693                 
1694     // The implementation of GetByOffset does not know to terminate speculative
1695     // execution if it doesn't have a prediction, so we do it manually.
1696     if (prediction == SpecNone)
1697         addToGraph(ForceOSRExit);
1698     else if (m_graph.compilation())
1699         m_graph.compilation()->noticeInlinedGetById();
1700     
1701     Node* originalBaseForBaselineJIT = base;
1702                 
1703     addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(getByIdStatus.structureSet())), base);
1704     
1705     if (getByIdStatus.chain()) {
1706         m_graph.chains().addLazily(getByIdStatus.chain());
1707         Structure* currentStructure = getByIdStatus.structureSet().singletonStructure();
1708         JSObject* currentObject = 0;
1709         for (unsigned i = 0; i < getByIdStatus.chain()->size(); ++i) {
1710             currentObject = asObject(currentStructure->prototypeForLookup(m_inlineStackTop->m_codeBlock));
1711             currentStructure = getByIdStatus.chain()->at(i);
1712             base = cellConstantWithStructureCheck(currentObject, currentStructure);
1713         }
1714     }
1715     
1716     // Unless we want bugs like https://bugs.webkit.org/show_bug.cgi?id=88783, we need to
1717     // ensure that the base of the original get_by_id is kept alive until we're done with
1718     // all of the speculations. We only insert the Phantom if there had been a CheckStructure
1719     // on something other than the base following the CheckStructure on base, or if the
1720     // access was compiled to a WeakJSConstant specific value, in which case we might not
1721     // have any explicit use of the base at all.
1722     if (getByIdStatus.specificValue() || originalBaseForBaselineJIT != base)
1723         addToGraph(Phantom, originalBaseForBaselineJIT);
1724     
1725     if (getByIdStatus.specificValue()) {
1726         ASSERT(getByIdStatus.specificValue().isCell());
1727         
1728         set(destinationOperand, cellConstant(getByIdStatus.specificValue().asCell()));
1729         return;
1730     }
1731     
1732     handleGetByOffset(
1733         destinationOperand, prediction, base, identifierNumber, getByIdStatus.offset());
1734 }
1735
1736 void ByteCodeParser::prepareToParseBlock()
1737 {
1738     for (unsigned i = 0; i < m_constants.size(); ++i)
1739         m_constants[i] = ConstantRecord();
1740     m_cellConstantNodes.clear();
1741 }
1742
1743 Node* ByteCodeParser::getScope(bool skipTop, unsigned skipCount)
1744 {
1745     Node* localBase;
1746     if (inlineCallFrame() && !inlineCallFrame()->isClosureCall()) {
1747         ASSERT(inlineCallFrame()->callee);
1748         localBase = cellConstant(inlineCallFrame()->callee->scope());
1749     } else
1750         localBase = addToGraph(GetMyScope);
1751     if (skipTop) {
1752         ASSERT(!inlineCallFrame());
1753         localBase = addToGraph(SkipTopScope, localBase);
1754     }
1755     for (unsigned n = skipCount; n--;)
1756         localBase = addToGraph(SkipScope, localBase);
1757     return localBase;
1758 }
1759
1760 bool ByteCodeParser::parseBlock(unsigned limit)
1761 {
1762     bool shouldContinueParsing = true;
1763
1764     Interpreter* interpreter = m_vm->interpreter;
1765     Instruction* instructionsBegin = m_inlineStackTop->m_codeBlock->instructions().begin();
1766     unsigned blockBegin = m_currentIndex;
1767     
1768     // If we are the first basic block, introduce markers for arguments. This allows
1769     // us to track if a use of an argument may use the actual argument passed, as
1770     // opposed to using a value we set explicitly.
1771     if (m_currentBlock == m_graph.block(0) && !inlineCallFrame()) {
1772         m_graph.m_arguments.resize(m_numArguments);
1773         for (unsigned argument = 0; argument < m_numArguments; ++argument) {
1774             VariableAccessData* variable = newVariableAccessData(
1775                 argumentToOperand(argument), m_codeBlock->isCaptured(argumentToOperand(argument)));
1776             variable->mergeStructureCheckHoistingFailed(
1777                 m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
1778             variable->mergeCheckArrayHoistingFailed(
1779                 m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIndexingType));
1780             
1781             Node* setArgument = addToGraph(SetArgument, OpInfo(variable));
1782             m_graph.m_arguments[argument] = setArgument;
1783             m_currentBlock->variablesAtTail.setArgumentFirstTime(argument, setArgument);
1784         }
1785     }
1786
1787     while (true) {
1788         // Don't extend over jump destinations.
1789         if (m_currentIndex == limit) {
1790             // Ordinarily we want to plant a jump. But refuse to do this if the block is
1791             // empty. This is a special case for inlining, which might otherwise create
1792             // some empty blocks in some cases. When parseBlock() returns with an empty
1793             // block, it will get repurposed instead of creating a new one. Note that this
1794             // logic relies on every bytecode resulting in one or more nodes, which would
1795             // be true anyway except for op_loop_hint, which emits a Phantom to force this
1796             // to be true.
1797             if (!m_currentBlock->isEmpty())
1798                 addToGraph(Jump, OpInfo(m_currentIndex));
1799             else {
1800 #if DFG_ENABLE(DEBUG_VERBOSE)
1801                 dataLogF("Refusing to plant jump at limit %u because block %p is empty.\n", limit, m_currentBlock);
1802 #endif
1803             }
1804             return shouldContinueParsing;
1805         }
1806         
1807         // Switch on the current bytecode opcode.
1808         Instruction* currentInstruction = instructionsBegin + m_currentIndex;
1809         m_currentInstruction = currentInstruction; // Some methods want to use this, and we'd rather not thread it through calls.
1810         OpcodeID opcodeID = interpreter->getOpcodeID(currentInstruction->u.opcode);
1811         
1812         if (m_graph.compilation()) {
1813             addToGraph(CountExecution, OpInfo(m_graph.compilation()->executionCounterFor(
1814                 Profiler::OriginStack(*m_vm->m_perBytecodeProfiler, m_codeBlock, currentCodeOrigin()))));
1815         }
1816         
1817         switch (opcodeID) {
1818
1819         // === Function entry opcodes ===
1820
1821         case op_enter:
1822             // Initialize all locals to undefined.
1823             for (int i = 0; i < m_inlineStackTop->m_codeBlock->m_numVars; ++i)
1824                 set(i, constantUndefined(), SetOnEntry);
1825             NEXT_OPCODE(op_enter);
1826
1827         case op_to_this: {
1828             Node* op1 = getThis();
1829             if (op1->op() != ToThis) {
1830                 ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
1831                 ValueProfile* profile =
1832                     m_inlineStackTop->m_profiledBlock->valueProfileForBytecodeOffset(m_currentIndex);
1833                 profile->computeUpdatedPrediction(locker);
1834 #if DFG_ENABLE(DEBUG_VERBOSE)
1835                 dataLogF("[bc#%u]: profile %p: ", m_currentIndex, profile);
1836                 profile->dump(WTF::dataFile());
1837                 dataLogF("\n");
1838 #endif
1839                 if (profile->m_singletonValueIsTop
1840                     || !profile->m_singletonValue
1841                     || !profile->m_singletonValue.isCell()
1842                     || profile->m_singletonValue.asCell()->classInfo() != Structure::info())
1843                     setThis(addToGraph(ToThis, op1));
1844                 else {
1845                     addToGraph(
1846                         CheckStructure,
1847                         OpInfo(m_graph.addStructureSet(jsCast<Structure*>(profile->m_singletonValue.asCell()))),
1848                         op1);
1849                 }
1850             }
1851             NEXT_OPCODE(op_to_this);
1852         }
1853
1854         case op_create_this: {
1855             int calleeOperand = currentInstruction[2].u.operand;
1856             Node* callee = get(calleeOperand);
1857             bool alreadyEmitted = false;
1858             if (callee->op() == WeakJSConstant) {
1859                 JSCell* cell = callee->weakConstant();
1860                 ASSERT(cell->inherits(JSFunction::info()));
1861                 
1862                 JSFunction* function = jsCast<JSFunction*>(cell);
1863                 ObjectAllocationProfile* allocationProfile = function->tryGetAllocationProfile();
1864                 if (allocationProfile) {
1865                     addToGraph(AllocationProfileWatchpoint, OpInfo(function));
1866                     // The callee is still live up to this point.
1867                     addToGraph(Phantom, callee);
1868                     set(currentInstruction[1].u.operand,
1869                         addToGraph(NewObject, OpInfo(allocationProfile->structure())));
1870                     alreadyEmitted = true;
1871                 }
1872             }
1873             if (!alreadyEmitted)
1874                 set(currentInstruction[1].u.operand,
1875                     addToGraph(CreateThis, OpInfo(currentInstruction[3].u.operand), callee));
1876             NEXT_OPCODE(op_create_this);
1877         }
1878
1879         case op_new_object: {
1880             set(currentInstruction[1].u.operand,
1881                 addToGraph(NewObject,
1882                     OpInfo(currentInstruction[3].u.objectAllocationProfile->structure())));
1883             NEXT_OPCODE(op_new_object);
1884         }
1885             
1886         case op_new_array: {
1887             int startOperand = currentInstruction[2].u.operand;
1888             int numOperands = currentInstruction[3].u.operand;
1889             ArrayAllocationProfile* profile = currentInstruction[4].u.arrayAllocationProfile;
1890             for (int operandIdx = startOperand; operandIdx < startOperand + numOperands; ++operandIdx)
1891                 addVarArgChild(get(operandIdx));
1892             set(currentInstruction[1].u.operand, addToGraph(Node::VarArg, NewArray, OpInfo(profile->selectIndexingType()), OpInfo(0)));
1893             NEXT_OPCODE(op_new_array);
1894         }
1895             
1896         case op_new_array_with_size: {
1897             int lengthOperand = currentInstruction[2].u.operand;
1898             ArrayAllocationProfile* profile = currentInstruction[3].u.arrayAllocationProfile;
1899             set(currentInstruction[1].u.operand, addToGraph(NewArrayWithSize, OpInfo(profile->selectIndexingType()), get(lengthOperand)));
1900             NEXT_OPCODE(op_new_array_with_size);
1901         }
1902             
1903         case op_new_array_buffer: {
1904             int startConstant = currentInstruction[2].u.operand;
1905             int numConstants = currentInstruction[3].u.operand;
1906             ArrayAllocationProfile* profile = currentInstruction[4].u.arrayAllocationProfile;
1907             NewArrayBufferData data;
1908             data.startConstant = m_inlineStackTop->m_constantBufferRemap[startConstant];
1909             data.numConstants = numConstants;
1910             data.indexingType = profile->selectIndexingType();
1911
1912             // If this statement has never executed, we'll have the wrong indexing type in the profile.
1913             for (int i = 0; i < numConstants; ++i) {
1914                 data.indexingType =
1915                     leastUpperBoundOfIndexingTypeAndValue(
1916                         data.indexingType,
1917                         m_codeBlock->constantBuffer(data.startConstant)[i]);
1918             }
1919             
1920             m_graph.m_newArrayBufferData.append(data);
1921             set(currentInstruction[1].u.operand, addToGraph(NewArrayBuffer, OpInfo(&m_graph.m_newArrayBufferData.last())));
1922             NEXT_OPCODE(op_new_array_buffer);
1923         }
1924             
1925         case op_new_regexp: {
1926             set(currentInstruction[1].u.operand, addToGraph(NewRegexp, OpInfo(currentInstruction[2].u.operand)));
1927             NEXT_OPCODE(op_new_regexp);
1928         }
1929             
1930         case op_get_callee: {
1931             ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
1932             ValueProfile* profile = currentInstruction[2].u.profile;
1933             profile->computeUpdatedPrediction(locker);
1934             if (profile->m_singletonValueIsTop
1935                 || !profile->m_singletonValue
1936                 || !profile->m_singletonValue.isCell())
1937                 set(currentInstruction[1].u.operand, get(JSStack::Callee));
1938             else {
1939                 ASSERT(profile->m_singletonValue.asCell()->inherits(JSFunction::info()));
1940                 Node* actualCallee = get(JSStack::Callee);
1941                 addToGraph(CheckFunction, OpInfo(profile->m_singletonValue.asCell()), actualCallee);
1942                 set(currentInstruction[1].u.operand, addToGraph(WeakJSConstant, OpInfo(profile->m_singletonValue.asCell())));
1943             }
1944             NEXT_OPCODE(op_get_callee);
1945         }
1946
1947         // === Bitwise operations ===
1948
1949         case op_bitand: {
1950             Node* op1 = getToInt32(currentInstruction[2].u.operand);
1951             Node* op2 = getToInt32(currentInstruction[3].u.operand);
1952             set(currentInstruction[1].u.operand, addToGraph(BitAnd, op1, op2));
1953             NEXT_OPCODE(op_bitand);
1954         }
1955
1956         case op_bitor: {
1957             Node* op1 = getToInt32(currentInstruction[2].u.operand);
1958             Node* op2 = getToInt32(currentInstruction[3].u.operand);
1959             set(currentInstruction[1].u.operand, addToGraph(BitOr, op1, op2));
1960             NEXT_OPCODE(op_bitor);
1961         }
1962
1963         case op_bitxor: {
1964             Node* op1 = getToInt32(currentInstruction[2].u.operand);
1965             Node* op2 = getToInt32(currentInstruction[3].u.operand);
1966             set(currentInstruction[1].u.operand, addToGraph(BitXor, op1, op2));
1967             NEXT_OPCODE(op_bitxor);
1968         }
1969
1970         case op_rshift: {
1971             Node* op1 = getToInt32(currentInstruction[2].u.operand);
1972             Node* op2 = getToInt32(currentInstruction[3].u.operand);
1973             Node* result;
1974             // Optimize out shifts by zero.
1975             if (isInt32Constant(op2) && !(valueOfInt32Constant(op2) & 0x1f))
1976                 result = op1;
1977             else
1978                 result = addToGraph(BitRShift, op1, op2);
1979             set(currentInstruction[1].u.operand, result);
1980             NEXT_OPCODE(op_rshift);
1981         }
1982
1983         case op_lshift: {
1984             Node* op1 = getToInt32(currentInstruction[2].u.operand);
1985             Node* op2 = getToInt32(currentInstruction[3].u.operand);
1986             Node* result;
1987             // Optimize out shifts by zero.
1988             if (isInt32Constant(op2) && !(valueOfInt32Constant(op2) & 0x1f))
1989                 result = op1;
1990             else
1991                 result = addToGraph(BitLShift, op1, op2);
1992             set(currentInstruction[1].u.operand, result);
1993             NEXT_OPCODE(op_lshift);
1994         }
1995
1996         case op_urshift: {
1997             Node* op1 = getToInt32(currentInstruction[2].u.operand);
1998             Node* op2 = getToInt32(currentInstruction[3].u.operand);
1999             Node* result;
2000             // The result of a zero-extending right shift is treated as an unsigned value.
2001             // This means that if the top bit is set, the result is not in the int32 range,
2002             // and as such must be stored as a double. If the shift amount is a constant,
2003             // we may be able to optimize.
2004             if (isInt32Constant(op2)) {
2005                 // If we know we are shifting by a non-zero amount, then since the operation
2006                 // zero fills we know the top bit of the result must be zero, and as such the
2007                 // result must be within the int32 range. Conversely, if this is a shift by
2008                 // zero, then the result may be changed by the conversion to unsigned, but it
2009                 // is not necessary to perform the shift!
2010                 if (valueOfInt32Constant(op2) & 0x1f)
2011                     result = addToGraph(BitURShift, op1, op2);
2012                 else
2013                     result = makeSafe(addToGraph(UInt32ToNumber, op1));
2014             }  else {
2015                 // Cannot optimize at this stage; shift & potentially rebox as a double.
2016                 result = addToGraph(BitURShift, op1, op2);
2017                 result = makeSafe(addToGraph(UInt32ToNumber, result));
2018             }
2019             set(currentInstruction[1].u.operand, result);
2020             NEXT_OPCODE(op_urshift);
2021         }
2022
2023         // === Increment/Decrement opcodes ===
2024
2025         case op_inc: {
2026             unsigned srcDst = currentInstruction[1].u.operand;
2027             Node* op = get(srcDst);
2028             set(srcDst, makeSafe(addToGraph(ArithAdd, op, one())));
2029             NEXT_OPCODE(op_inc);
2030         }
2031
2032         case op_dec: {
2033             unsigned srcDst = currentInstruction[1].u.operand;
2034             Node* op = get(srcDst);
2035             set(srcDst, makeSafe(addToGraph(ArithSub, op, one())));
2036             NEXT_OPCODE(op_dec);
2037         }
2038
2039         // === Arithmetic operations ===
2040
2041         case op_add: {
2042             Node* op1 = get(currentInstruction[2].u.operand);
2043             Node* op2 = get(currentInstruction[3].u.operand);
2044             if (op1->hasNumberResult() && op2->hasNumberResult())
2045                 set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithAdd, op1, op2)));
2046             else
2047                 set(currentInstruction[1].u.operand, makeSafe(addToGraph(ValueAdd, op1, op2)));
2048             NEXT_OPCODE(op_add);
2049         }
2050
2051         case op_sub: {
2052             Node* op1 = get(currentInstruction[2].u.operand);
2053             Node* op2 = get(currentInstruction[3].u.operand);
2054             set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithSub, op1, op2)));
2055             NEXT_OPCODE(op_sub);
2056         }
2057
2058         case op_negate: {
2059             Node* op1 = get(currentInstruction[2].u.operand);
2060             set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithNegate, op1)));
2061             NEXT_OPCODE(op_negate);
2062         }
2063
2064         case op_mul: {
2065             // Multiply requires that the inputs are not truncated, unfortunately.
2066             Node* op1 = get(currentInstruction[2].u.operand);
2067             Node* op2 = get(currentInstruction[3].u.operand);
2068             set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithMul, op1, op2)));
2069             NEXT_OPCODE(op_mul);
2070         }
2071
2072         case op_mod: {
2073             Node* op1 = get(currentInstruction[2].u.operand);
2074             Node* op2 = get(currentInstruction[3].u.operand);
2075             set(currentInstruction[1].u.operand, makeSafe(addToGraph(ArithMod, op1, op2)));
2076             NEXT_OPCODE(op_mod);
2077         }
2078
2079         case op_div: {
2080             Node* op1 = get(currentInstruction[2].u.operand);
2081             Node* op2 = get(currentInstruction[3].u.operand);
2082             set(currentInstruction[1].u.operand, makeDivSafe(addToGraph(ArithDiv, op1, op2)));
2083             NEXT_OPCODE(op_div);
2084         }
2085
2086         // === Misc operations ===
2087
2088 #if ENABLE(DEBUG_WITH_BREAKPOINT)
2089         case op_debug:
2090             addToGraph(Breakpoint);
2091             NEXT_OPCODE(op_debug);
2092 #endif
2093         case op_mov: {
2094             Node* op = get(currentInstruction[2].u.operand);
2095             set(currentInstruction[1].u.operand, op);
2096             NEXT_OPCODE(op_mov);
2097         }
2098
2099         case op_check_has_instance:
2100             addToGraph(CheckHasInstance, get(currentInstruction[3].u.operand));
2101             NEXT_OPCODE(op_check_has_instance);
2102
2103         case op_instanceof: {
2104             Node* value = get(currentInstruction[2].u.operand);
2105             Node* prototype = get(currentInstruction[3].u.operand);
2106             set(currentInstruction[1].u.operand, addToGraph(InstanceOf, value, prototype));
2107             NEXT_OPCODE(op_instanceof);
2108         }
2109             
2110         case op_is_undefined: {
2111             Node* value = get(currentInstruction[2].u.operand);
2112             set(currentInstruction[1].u.operand, addToGraph(IsUndefined, value));
2113             NEXT_OPCODE(op_is_undefined);
2114         }
2115
2116         case op_is_boolean: {
2117             Node* value = get(currentInstruction[2].u.operand);
2118             set(currentInstruction[1].u.operand, addToGraph(IsBoolean, value));
2119             NEXT_OPCODE(op_is_boolean);
2120         }
2121
2122         case op_is_number: {
2123             Node* value = get(currentInstruction[2].u.operand);
2124             set(currentInstruction[1].u.operand, addToGraph(IsNumber, value));
2125             NEXT_OPCODE(op_is_number);
2126         }
2127
2128         case op_is_string: {
2129             Node* value = get(currentInstruction[2].u.operand);
2130             set(currentInstruction[1].u.operand, addToGraph(IsString, value));
2131             NEXT_OPCODE(op_is_string);
2132         }
2133
2134         case op_is_object: {
2135             Node* value = get(currentInstruction[2].u.operand);
2136             set(currentInstruction[1].u.operand, addToGraph(IsObject, value));
2137             NEXT_OPCODE(op_is_object);
2138         }
2139
2140         case op_is_function: {
2141             Node* value = get(currentInstruction[2].u.operand);
2142             set(currentInstruction[1].u.operand, addToGraph(IsFunction, value));
2143             NEXT_OPCODE(op_is_function);
2144         }
2145
2146         case op_not: {
2147             Node* value = get(currentInstruction[2].u.operand);
2148             set(currentInstruction[1].u.operand, addToGraph(LogicalNot, value));
2149             NEXT_OPCODE(op_not);
2150         }
2151             
2152         case op_to_primitive: {
2153             Node* value = get(currentInstruction[2].u.operand);
2154             set(currentInstruction[1].u.operand, addToGraph(ToPrimitive, value));
2155             NEXT_OPCODE(op_to_primitive);
2156         }
2157             
2158         case op_strcat: {
2159             int startOperand = currentInstruction[2].u.operand;
2160             int numOperands = currentInstruction[3].u.operand;
2161 #if CPU(X86)
2162             // X86 doesn't have enough registers to compile MakeRope with three arguments.
2163             // Rather than try to be clever, we just make MakeRope dumber on this processor.
2164             const unsigned maxRopeArguments = 2;
2165 #else
2166             const unsigned maxRopeArguments = 3;
2167 #endif
2168             OwnArrayPtr<Node*> toStringNodes = adoptArrayPtr(new Node*[numOperands]);
2169             for (int i = 0; i < numOperands; i++)
2170                 toStringNodes[i] = addToGraph(ToString, get(startOperand + i));
2171
2172             for (int i = 0; i < numOperands; i++)
2173                 addToGraph(Phantom, toStringNodes[i]);
2174
2175             Node* operands[AdjacencyList::Size];
2176             unsigned indexInOperands = 0;
2177             for (unsigned i = 0; i < AdjacencyList::Size; ++i)
2178                 operands[i] = 0;
2179             for (int operandIdx = 0; operandIdx < numOperands; ++operandIdx) {
2180                 if (indexInOperands == maxRopeArguments) {
2181                     operands[0] = addToGraph(MakeRope, operands[0], operands[1], operands[2]);
2182                     for (unsigned i = 1; i < AdjacencyList::Size; ++i)
2183                         operands[i] = 0;
2184                     indexInOperands = 1;
2185                 }
2186                 
2187                 ASSERT(indexInOperands < AdjacencyList::Size);
2188                 ASSERT(indexInOperands < maxRopeArguments);
2189                 operands[indexInOperands++] = toStringNodes[operandIdx];
2190             }
2191             set(currentInstruction[1].u.operand,
2192                 addToGraph(MakeRope, operands[0], operands[1], operands[2]));
2193             NEXT_OPCODE(op_strcat);
2194         }
2195
2196         case op_less: {
2197             Node* op1 = get(currentInstruction[2].u.operand);
2198             Node* op2 = get(currentInstruction[3].u.operand);
2199             if (canFold(op1) && canFold(op2)) {
2200                 JSValue a = valueOfJSConstant(op1);
2201                 JSValue b = valueOfJSConstant(op2);
2202                 if (a.isNumber() && b.isNumber()) {
2203                     set(currentInstruction[1].u.operand,
2204                         getJSConstantForValue(jsBoolean(a.asNumber() < b.asNumber())));
2205                     NEXT_OPCODE(op_less);
2206                 }
2207             }
2208             set(currentInstruction[1].u.operand, addToGraph(CompareLess, op1, op2));
2209             NEXT_OPCODE(op_less);
2210         }
2211
2212         case op_lesseq: {
2213             Node* op1 = get(currentInstruction[2].u.operand);
2214             Node* op2 = get(currentInstruction[3].u.operand);
2215             if (canFold(op1) && canFold(op2)) {
2216                 JSValue a = valueOfJSConstant(op1);
2217                 JSValue b = valueOfJSConstant(op2);
2218                 if (a.isNumber() && b.isNumber()) {
2219                     set(currentInstruction[1].u.operand,
2220                         getJSConstantForValue(jsBoolean(a.asNumber() <= b.asNumber())));
2221                     NEXT_OPCODE(op_lesseq);
2222                 }
2223             }
2224             set(currentInstruction[1].u.operand, addToGraph(CompareLessEq, op1, op2));
2225             NEXT_OPCODE(op_lesseq);
2226         }
2227
2228         case op_greater: {
2229             Node* op1 = get(currentInstruction[2].u.operand);
2230             Node* op2 = get(currentInstruction[3].u.operand);
2231             if (canFold(op1) && canFold(op2)) {
2232                 JSValue a = valueOfJSConstant(op1);
2233                 JSValue b = valueOfJSConstant(op2);
2234                 if (a.isNumber() && b.isNumber()) {
2235                     set(currentInstruction[1].u.operand,
2236                         getJSConstantForValue(jsBoolean(a.asNumber() > b.asNumber())));
2237                     NEXT_OPCODE(op_greater);
2238                 }
2239             }
2240             set(currentInstruction[1].u.operand, addToGraph(CompareGreater, op1, op2));
2241             NEXT_OPCODE(op_greater);
2242         }
2243
2244         case op_greatereq: {
2245             Node* op1 = get(currentInstruction[2].u.operand);
2246             Node* op2 = get(currentInstruction[3].u.operand);
2247             if (canFold(op1) && canFold(op2)) {
2248                 JSValue a = valueOfJSConstant(op1);
2249                 JSValue b = valueOfJSConstant(op2);
2250                 if (a.isNumber() && b.isNumber()) {
2251                     set(currentInstruction[1].u.operand,
2252                         getJSConstantForValue(jsBoolean(a.asNumber() >= b.asNumber())));
2253                     NEXT_OPCODE(op_greatereq);
2254                 }
2255             }
2256             set(currentInstruction[1].u.operand, addToGraph(CompareGreaterEq, op1, op2));
2257             NEXT_OPCODE(op_greatereq);
2258         }
2259
2260         case op_eq: {
2261             Node* op1 = get(currentInstruction[2].u.operand);
2262             Node* op2 = get(currentInstruction[3].u.operand);
2263             if (canFold(op1) && canFold(op2)) {
2264                 JSValue a = valueOfJSConstant(op1);
2265                 JSValue b = valueOfJSConstant(op2);
2266                 set(currentInstruction[1].u.operand,
2267                     getJSConstantForValue(jsBoolean(JSValue::equal(m_codeBlock->globalObject()->globalExec(), a, b))));
2268                 NEXT_OPCODE(op_eq);
2269             }
2270             set(currentInstruction[1].u.operand, addToGraph(CompareEq, op1, op2));
2271             NEXT_OPCODE(op_eq);
2272         }
2273
2274         case op_eq_null: {
2275             Node* value = get(currentInstruction[2].u.operand);
2276             set(currentInstruction[1].u.operand, addToGraph(CompareEqConstant, value, constantNull()));
2277             NEXT_OPCODE(op_eq_null);
2278         }
2279
2280         case op_stricteq: {
2281             Node* op1 = get(currentInstruction[2].u.operand);
2282             Node* op2 = get(currentInstruction[3].u.operand);
2283             if (canFold(op1) && canFold(op2)) {
2284                 JSValue a = valueOfJSConstant(op1);
2285                 JSValue b = valueOfJSConstant(op2);
2286                 set(currentInstruction[1].u.operand,
2287                     getJSConstantForValue(jsBoolean(JSValue::strictEqual(m_codeBlock->globalObject()->globalExec(), a, b))));
2288                 NEXT_OPCODE(op_stricteq);
2289             }
2290             if (isConstantForCompareStrictEq(op1))
2291                 set(currentInstruction[1].u.operand, addToGraph(CompareStrictEqConstant, op2, op1));
2292             else if (isConstantForCompareStrictEq(op2))
2293                 set(currentInstruction[1].u.operand, addToGraph(CompareStrictEqConstant, op1, op2));
2294             else
2295                 set(currentInstruction[1].u.operand, addToGraph(CompareStrictEq, op1, op2));
2296             NEXT_OPCODE(op_stricteq);
2297         }
2298
2299         case op_neq: {
2300             Node* op1 = get(currentInstruction[2].u.operand);
2301             Node* op2 = get(currentInstruction[3].u.operand);
2302             if (canFold(op1) && canFold(op2)) {
2303                 JSValue a = valueOfJSConstant(op1);
2304                 JSValue b = valueOfJSConstant(op2);
2305                 set(currentInstruction[1].u.operand,
2306                     getJSConstantForValue(jsBoolean(!JSValue::equal(m_codeBlock->globalObject()->globalExec(), a, b))));
2307                 NEXT_OPCODE(op_neq);
2308             }
2309             set(currentInstruction[1].u.operand, addToGraph(LogicalNot, addToGraph(CompareEq, op1, op2)));
2310             NEXT_OPCODE(op_neq);
2311         }
2312
2313         case op_neq_null: {
2314             Node* value = get(currentInstruction[2].u.operand);
2315             set(currentInstruction[1].u.operand, addToGraph(LogicalNot, addToGraph(CompareEqConstant, value, constantNull())));
2316             NEXT_OPCODE(op_neq_null);
2317         }
2318
2319         case op_nstricteq: {
2320             Node* op1 = get(currentInstruction[2].u.operand);
2321             Node* op2 = get(currentInstruction[3].u.operand);
2322             if (canFold(op1) && canFold(op2)) {
2323                 JSValue a = valueOfJSConstant(op1);
2324                 JSValue b = valueOfJSConstant(op2);
2325                 set(currentInstruction[1].u.operand,
2326                     getJSConstantForValue(jsBoolean(!JSValue::strictEqual(m_codeBlock->globalObject()->globalExec(), a, b))));
2327                 NEXT_OPCODE(op_nstricteq);
2328             }
2329             Node* invertedResult;
2330             if (isConstantForCompareStrictEq(op1))
2331                 invertedResult = addToGraph(CompareStrictEqConstant, op2, op1);
2332             else if (isConstantForCompareStrictEq(op2))
2333                 invertedResult = addToGraph(CompareStrictEqConstant, op1, op2);
2334             else
2335                 invertedResult = addToGraph(CompareStrictEq, op1, op2);
2336             set(currentInstruction[1].u.operand, addToGraph(LogicalNot, invertedResult));
2337             NEXT_OPCODE(op_nstricteq);
2338         }
2339
2340         // === Property access operations ===
2341
2342         case op_get_by_val: {
2343             SpeculatedType prediction = getPrediction();
2344             
2345             Node* base = get(currentInstruction[2].u.operand);
2346             ArrayMode arrayMode = getArrayModeConsideringSlowPath(currentInstruction[4].u.arrayProfile, Array::Read);
2347             Node* property = get(currentInstruction[3].u.operand);
2348             Node* getByVal = addToGraph(GetByVal, OpInfo(arrayMode.asWord()), OpInfo(prediction), base, property);
2349             set(currentInstruction[1].u.operand, getByVal);
2350
2351             NEXT_OPCODE(op_get_by_val);
2352         }
2353
2354         case op_put_by_val: {
2355             Node* base = get(currentInstruction[1].u.operand);
2356
2357             ArrayMode arrayMode = getArrayModeConsideringSlowPath(currentInstruction[4].u.arrayProfile, Array::Write);
2358             
2359             Node* property = get(currentInstruction[2].u.operand);
2360             Node* value = get(currentInstruction[3].u.operand);
2361             
2362             addVarArgChild(base);
2363             addVarArgChild(property);
2364             addVarArgChild(value);
2365             addVarArgChild(0); // Leave room for property storage.
2366             addToGraph(Node::VarArg, PutByVal, OpInfo(arrayMode.asWord()), OpInfo(0));
2367
2368             NEXT_OPCODE(op_put_by_val);
2369         }
2370             
2371         case op_get_by_id:
2372         case op_get_by_id_out_of_line:
2373         case op_get_array_length: {
2374             SpeculatedType prediction = getPrediction();
2375             
2376             Node* base = get(currentInstruction[2].u.operand);
2377             unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand];
2378             
2379             StringImpl* uid = m_graph.identifiers()[identifierNumber];
2380             GetByIdStatus getByIdStatus = GetByIdStatus::computeFor(
2381                 m_inlineStackTop->m_profiledBlock, m_currentIndex, uid);
2382             
2383             handleGetById(
2384                 currentInstruction[1].u.operand, prediction, base, identifierNumber, getByIdStatus);
2385
2386             NEXT_OPCODE(op_get_by_id);
2387         }
2388         case op_put_by_id:
2389         case op_put_by_id_out_of_line:
2390         case op_put_by_id_transition_direct:
2391         case op_put_by_id_transition_normal:
2392         case op_put_by_id_transition_direct_out_of_line:
2393         case op_put_by_id_transition_normal_out_of_line: {
2394             Node* value = get(currentInstruction[3].u.operand);
2395             Node* base = get(currentInstruction[1].u.operand);
2396             unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
2397             bool direct = currentInstruction[8].u.operand;
2398
2399             PutByIdStatus putByIdStatus = PutByIdStatus::computeFor(
2400                 m_inlineStackTop->m_profiledBlock,
2401                 m_currentIndex,
2402                 m_graph.identifiers()[identifierNumber]);
2403             bool canCountAsInlined = true;
2404             if (!putByIdStatus.isSet()) {
2405                 addToGraph(ForceOSRExit);
2406                 canCountAsInlined = false;
2407             }
2408             
2409             bool hasExitSite =
2410                 m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)
2411                 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadWeakConstantCache);
2412             
2413             if (!hasExitSite && putByIdStatus.isSimpleReplace()) {
2414                 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(putByIdStatus.oldStructure())), base);
2415                 handlePutByOffset(base, identifierNumber, putByIdStatus.offset(), value);
2416             } else if (
2417                 !hasExitSite
2418                 && putByIdStatus.isSimpleTransition()
2419                 && (!putByIdStatus.structureChain()
2420                     || putByIdStatus.structureChain()->isStillValid())) {
2421                 
2422                 m_graph.chains().addLazily(putByIdStatus.structureChain());
2423                 
2424                 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(putByIdStatus.oldStructure())), base);
2425                 if (!direct) {
2426                     if (!putByIdStatus.oldStructure()->storedPrototype().isNull()) {
2427                         cellConstantWithStructureCheck(
2428                             putByIdStatus.oldStructure()->storedPrototype().asCell());
2429                     }
2430                     
2431                     for (unsigned i = 0; i < putByIdStatus.structureChain()->size(); ++i) {
2432                         JSValue prototype = putByIdStatus.structureChain()->at(i)->storedPrototype();
2433                         if (prototype.isNull())
2434                             continue;
2435                         cellConstantWithStructureCheck(prototype.asCell());
2436                     }
2437                 }
2438                 ASSERT(putByIdStatus.oldStructure()->transitionWatchpointSetHasBeenInvalidated());
2439                 
2440                 Node* propertyStorage;
2441                 StructureTransitionData* transitionData =
2442                     m_graph.addStructureTransitionData(
2443                         StructureTransitionData(
2444                             putByIdStatus.oldStructure(),
2445                             putByIdStatus.newStructure()));
2446
2447                 if (putByIdStatus.oldStructure()->outOfLineCapacity()
2448                     != putByIdStatus.newStructure()->outOfLineCapacity()) {
2449                     
2450                     // If we're growing the property storage then it must be because we're
2451                     // storing into the out-of-line storage.
2452                     ASSERT(!isInlineOffset(putByIdStatus.offset()));
2453                     
2454                     if (!putByIdStatus.oldStructure()->outOfLineCapacity()) {
2455                         propertyStorage = addToGraph(
2456                             AllocatePropertyStorage, OpInfo(transitionData), base);
2457                     } else {
2458                         propertyStorage = addToGraph(
2459                             ReallocatePropertyStorage, OpInfo(transitionData),
2460                             base, addToGraph(GetButterfly, base));
2461                     }
2462                 } else {
2463                     if (isInlineOffset(putByIdStatus.offset()))
2464                         propertyStorage = base;
2465                     else
2466                         propertyStorage = addToGraph(GetButterfly, base);
2467                 }
2468                 
2469                 addToGraph(PutStructure, OpInfo(transitionData), base);
2470                 
2471                 addToGraph(
2472                     PutByOffset,
2473                     OpInfo(m_graph.m_storageAccessData.size()),
2474                     propertyStorage,
2475                     base,
2476                     value);
2477                 
2478                 StorageAccessData storageAccessData;
2479                 storageAccessData.offset = putByIdStatus.offset();
2480                 storageAccessData.identifierNumber = identifierNumber;
2481                 m_graph.m_storageAccessData.append(storageAccessData);
2482             } else {
2483                 if (direct)
2484                     addToGraph(PutByIdDirect, OpInfo(identifierNumber), base, value);
2485                 else
2486                     addToGraph(PutById, OpInfo(identifierNumber), base, value);
2487                 canCountAsInlined = false;
2488             }
2489             
2490             if (canCountAsInlined && m_graph.compilation())
2491                 m_graph.compilation()->noticeInlinedPutById();
2492
2493             NEXT_OPCODE(op_put_by_id);
2494         }
2495
2496         case op_init_global_const_nop: {
2497             NEXT_OPCODE(op_init_global_const_nop);
2498         }
2499
2500         case op_init_global_const: {
2501             Node* value = get(currentInstruction[2].u.operand);
2502             addToGraph(
2503                 PutGlobalVar,
2504                 OpInfo(m_inlineStackTop->m_codeBlock->globalObject()->assertRegisterIsInThisObject(currentInstruction[1].u.registerPointer)),
2505                 value);
2506             NEXT_OPCODE(op_init_global_const);
2507         }
2508
2509         // === Block terminators. ===
2510
2511         case op_jmp: {
2512             unsigned relativeOffset = currentInstruction[1].u.operand;
2513             addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
2514             LAST_OPCODE(op_jmp);
2515         }
2516
2517         case op_jtrue: {
2518             unsigned relativeOffset = currentInstruction[2].u.operand;
2519             Node* condition = get(currentInstruction[1].u.operand);
2520             if (canFold(condition)) {
2521                 TriState state = valueOfJSConstant(condition).pureToBoolean();
2522                 if (state == TrueTriState) {
2523                     addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
2524                     LAST_OPCODE(op_jtrue);
2525                 } else if (state == FalseTriState) {
2526                     // Emit a placeholder for this bytecode operation but otherwise
2527                     // just fall through.
2528                     addToGraph(Phantom);
2529                     NEXT_OPCODE(op_jtrue);
2530                 }
2531             }
2532             addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jtrue)), condition);
2533             LAST_OPCODE(op_jtrue);
2534         }
2535
2536         case op_jfalse: {
2537             unsigned relativeOffset = currentInstruction[2].u.operand;
2538             Node* condition = get(currentInstruction[1].u.operand);
2539             if (canFold(condition)) {
2540                 TriState state = valueOfJSConstant(condition).pureToBoolean();
2541                 if (state == FalseTriState) {
2542                     addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
2543                     LAST_OPCODE(op_jfalse);
2544                 } else if (state == TrueTriState) {
2545                     // Emit a placeholder for this bytecode operation but otherwise
2546                     // just fall through.
2547                     addToGraph(Phantom);
2548                     NEXT_OPCODE(op_jfalse);
2549                 }
2550             }
2551             addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jfalse)), OpInfo(m_currentIndex + relativeOffset), condition);
2552             LAST_OPCODE(op_jfalse);
2553         }
2554
2555         case op_jeq_null: {
2556             unsigned relativeOffset = currentInstruction[2].u.operand;
2557             Node* value = get(currentInstruction[1].u.operand);
2558             Node* condition = addToGraph(CompareEqConstant, value, constantNull());
2559             addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jeq_null)), condition);
2560             LAST_OPCODE(op_jeq_null);
2561         }
2562
2563         case op_jneq_null: {
2564             unsigned relativeOffset = currentInstruction[2].u.operand;
2565             Node* value = get(currentInstruction[1].u.operand);
2566             Node* condition = addToGraph(CompareEqConstant, value, constantNull());
2567             addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jneq_null)), OpInfo(m_currentIndex + relativeOffset), condition);
2568             LAST_OPCODE(op_jneq_null);
2569         }
2570
2571         case op_jless: {
2572             unsigned relativeOffset = currentInstruction[3].u.operand;
2573             Node* op1 = get(currentInstruction[1].u.operand);
2574             Node* op2 = get(currentInstruction[2].u.operand);
2575             if (canFold(op1) && canFold(op2)) {
2576                 JSValue aValue = valueOfJSConstant(op1);
2577                 JSValue bValue = valueOfJSConstant(op2);
2578                 if (aValue.isNumber() && bValue.isNumber()) {
2579                     double a = aValue.asNumber();
2580                     double b = bValue.asNumber();
2581                     if (a < b) {
2582                         addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
2583                         LAST_OPCODE(op_jless);
2584                     } else {
2585                         // Emit a placeholder for this bytecode operation but otherwise
2586                         // just fall through.
2587                         addToGraph(Phantom);
2588                         NEXT_OPCODE(op_jless);
2589                     }
2590                 }
2591             }
2592             Node* condition = addToGraph(CompareLess, op1, op2);
2593             addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jless)), condition);
2594             LAST_OPCODE(op_jless);
2595         }
2596
2597         case op_jlesseq: {
2598             unsigned relativeOffset = currentInstruction[3].u.operand;
2599             Node* op1 = get(currentInstruction[1].u.operand);
2600             Node* op2 = get(currentInstruction[2].u.operand);
2601             if (canFold(op1) && canFold(op2)) {
2602                 JSValue aValue = valueOfJSConstant(op1);
2603                 JSValue bValue = valueOfJSConstant(op2);
2604                 if (aValue.isNumber() && bValue.isNumber()) {
2605                     double a = aValue.asNumber();
2606                     double b = bValue.asNumber();
2607                     if (a <= b) {
2608                         addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
2609                         LAST_OPCODE(op_jlesseq);
2610                     } else {
2611                         // Emit a placeholder for this bytecode operation but otherwise
2612                         // just fall through.
2613                         addToGraph(Phantom);
2614                         NEXT_OPCODE(op_jlesseq);
2615                     }
2616                 }
2617             }
2618             Node* condition = addToGraph(CompareLessEq, op1, op2);
2619             addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jlesseq)), condition);
2620             LAST_OPCODE(op_jlesseq);
2621         }
2622
2623         case op_jgreater: {
2624             unsigned relativeOffset = currentInstruction[3].u.operand;
2625             Node* op1 = get(currentInstruction[1].u.operand);
2626             Node* op2 = get(currentInstruction[2].u.operand);
2627             if (canFold(op1) && canFold(op2)) {
2628                 JSValue aValue = valueOfJSConstant(op1);
2629                 JSValue bValue = valueOfJSConstant(op2);
2630                 if (aValue.isNumber() && bValue.isNumber()) {
2631                     double a = aValue.asNumber();
2632                     double b = bValue.asNumber();
2633                     if (a > b) {
2634                         addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
2635                         LAST_OPCODE(op_jgreater);
2636                     } else {
2637                         // Emit a placeholder for this bytecode operation but otherwise
2638                         // just fall through.
2639                         addToGraph(Phantom);
2640                         NEXT_OPCODE(op_jgreater);
2641                     }
2642                 }
2643             }
2644             Node* condition = addToGraph(CompareGreater, op1, op2);
2645             addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jgreater)), condition);
2646             LAST_OPCODE(op_jgreater);
2647         }
2648
2649         case op_jgreatereq: {
2650             unsigned relativeOffset = currentInstruction[3].u.operand;
2651             Node* op1 = get(currentInstruction[1].u.operand);
2652             Node* op2 = get(currentInstruction[2].u.operand);
2653             if (canFold(op1) && canFold(op2)) {
2654                 JSValue aValue = valueOfJSConstant(op1);
2655                 JSValue bValue = valueOfJSConstant(op2);
2656                 if (aValue.isNumber() && bValue.isNumber()) {
2657                     double a = aValue.asNumber();
2658                     double b = bValue.asNumber();
2659                     if (a >= b) {
2660                         addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
2661                         LAST_OPCODE(op_jgreatereq);
2662                     } else {
2663                         // Emit a placeholder for this bytecode operation but otherwise
2664                         // just fall through.
2665                         addToGraph(Phantom);
2666                         NEXT_OPCODE(op_jgreatereq);
2667                     }
2668                 }
2669             }
2670             Node* condition = addToGraph(CompareGreaterEq, op1, op2);
2671             addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jgreatereq)), condition);
2672             LAST_OPCODE(op_jgreatereq);
2673         }
2674
2675         case op_jnless: {
2676             unsigned relativeOffset = currentInstruction[3].u.operand;
2677             Node* op1 = get(currentInstruction[1].u.operand);
2678             Node* op2 = get(currentInstruction[2].u.operand);
2679             if (canFold(op1) && canFold(op2)) {
2680                 JSValue aValue = valueOfJSConstant(op1);
2681                 JSValue bValue = valueOfJSConstant(op2);
2682                 if (aValue.isNumber() && bValue.isNumber()) {
2683                     double a = aValue.asNumber();
2684                     double b = bValue.asNumber();
2685                     if (a < b) {
2686                         // Emit a placeholder for this bytecode operation but otherwise
2687                         // just fall through.
2688                         addToGraph(Phantom);
2689                         NEXT_OPCODE(op_jnless);
2690                     } else {
2691                         addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
2692                         LAST_OPCODE(op_jnless);
2693                     }
2694                 }
2695             }
2696             Node* condition = addToGraph(CompareLess, op1, op2);
2697             addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jnless)), OpInfo(m_currentIndex + relativeOffset), condition);
2698             LAST_OPCODE(op_jnless);
2699         }
2700
2701         case op_jnlesseq: {
2702             unsigned relativeOffset = currentInstruction[3].u.operand;
2703             Node* op1 = get(currentInstruction[1].u.operand);
2704             Node* op2 = get(currentInstruction[2].u.operand);
2705             if (canFold(op1) && canFold(op2)) {
2706                 JSValue aValue = valueOfJSConstant(op1);
2707                 JSValue bValue = valueOfJSConstant(op2);
2708                 if (aValue.isNumber() && bValue.isNumber()) {
2709                     double a = aValue.asNumber();
2710                     double b = bValue.asNumber();
2711                     if (a <= b) {
2712                         // Emit a placeholder for this bytecode operation but otherwise
2713                         // just fall through.
2714                         addToGraph(Phantom);
2715                         NEXT_OPCODE(op_jnlesseq);
2716                     } else {
2717                         addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
2718                         LAST_OPCODE(op_jnlesseq);
2719                     }
2720                 }
2721             }
2722             Node* condition = addToGraph(CompareLessEq, op1, op2);
2723             addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jnlesseq)), OpInfo(m_currentIndex + relativeOffset), condition);
2724             LAST_OPCODE(op_jnlesseq);
2725         }
2726
2727         case op_jngreater: {
2728             unsigned relativeOffset = currentInstruction[3].u.operand;
2729             Node* op1 = get(currentInstruction[1].u.operand);
2730             Node* op2 = get(currentInstruction[2].u.operand);
2731             if (canFold(op1) && canFold(op2)) {
2732                 JSValue aValue = valueOfJSConstant(op1);
2733                 JSValue bValue = valueOfJSConstant(op2);
2734                 if (aValue.isNumber() && bValue.isNumber()) {
2735                     double a = aValue.asNumber();
2736                     double b = bValue.asNumber();
2737                     if (a > b) {
2738                         // Emit a placeholder for this bytecode operation but otherwise
2739                         // just fall through.
2740                         addToGraph(Phantom);
2741                         NEXT_OPCODE(op_jngreater);
2742                     } else {
2743                         addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
2744                         LAST_OPCODE(op_jngreater);
2745                     }
2746                 }
2747             }
2748             Node* condition = addToGraph(CompareGreater, op1, op2);
2749             addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jngreater)), OpInfo(m_currentIndex + relativeOffset), condition);
2750             LAST_OPCODE(op_jngreater);
2751         }
2752
2753         case op_jngreatereq: {
2754             unsigned relativeOffset = currentInstruction[3].u.operand;
2755             Node* op1 = get(currentInstruction[1].u.operand);
2756             Node* op2 = get(currentInstruction[2].u.operand);
2757             if (canFold(op1) && canFold(op2)) {
2758                 JSValue aValue = valueOfJSConstant(op1);
2759                 JSValue bValue = valueOfJSConstant(op2);
2760                 if (aValue.isNumber() && bValue.isNumber()) {
2761                     double a = aValue.asNumber();
2762                     double b = bValue.asNumber();
2763                     if (a >= b) {
2764                         // Emit a placeholder for this bytecode operation but otherwise
2765                         // just fall through.
2766                         addToGraph(Phantom);
2767                         NEXT_OPCODE(op_jngreatereq);
2768                     } else {
2769                         addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));
2770                         LAST_OPCODE(op_jngreatereq);
2771                     }
2772                 }
2773             }
2774             Node* condition = addToGraph(CompareGreaterEq, op1, op2);
2775             addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jngreatereq)), OpInfo(m_currentIndex + relativeOffset), condition);
2776             LAST_OPCODE(op_jngreatereq);
2777         }
2778             
2779         case op_switch_imm: {
2780             SwitchData data;
2781             data.kind = SwitchImm;
2782             data.switchTableIndex = m_inlineStackTop->m_switchRemap[currentInstruction[1].u.operand];
2783             data.setFallThroughBytecodeIndex(m_currentIndex + currentInstruction[2].u.operand);
2784             SimpleJumpTable& table = m_codeBlock->switchJumpTable(data.switchTableIndex);
2785             for (unsigned i = 0; i < table.branchOffsets.size(); ++i) {
2786                 if (!table.branchOffsets[i])
2787                     continue;
2788                 unsigned target = m_currentIndex + table.branchOffsets[i];
2789                 if (target == data.fallThroughBytecodeIndex())
2790                     continue;
2791                 data.cases.append(SwitchCase::withBytecodeIndex(jsNumber(table.min + i), target));
2792             }
2793             m_graph.m_switchData.append(data);
2794             addToGraph(Switch, OpInfo(&m_graph.m_switchData.last()), get(currentInstruction[3].u.operand));
2795             LAST_OPCODE(op_switch_imm);
2796         }
2797             
2798         case op_switch_char: {
2799             SwitchData data;
2800             data.kind = SwitchChar;
2801             data.switchTableIndex = m_inlineStackTop->m_switchRemap[currentInstruction[1].u.operand];
2802             data.setFallThroughBytecodeIndex(m_currentIndex + currentInstruction[2].u.operand);
2803             SimpleJumpTable& table = m_codeBlock->switchJumpTable(data.switchTableIndex);
2804             for (unsigned i = 0; i < table.branchOffsets.size(); ++i) {
2805                 if (!table.branchOffsets[i])
2806                     continue;
2807                 unsigned target = m_currentIndex + table.branchOffsets[i];
2808                 if (target == data.fallThroughBytecodeIndex())
2809                     continue;
2810                 data.cases.append(
2811                     SwitchCase::withBytecodeIndex(LazyJSValue::singleCharacterString(table.min + i), target));
2812             }
2813             m_graph.m_switchData.append(data);
2814             addToGraph(Switch, OpInfo(&m_graph.m_switchData.last()), get(currentInstruction[3].u.operand));
2815             LAST_OPCODE(op_switch_char);
2816         }
2817
2818         case op_switch_string: {
2819             SwitchData data;
2820             data.kind = SwitchString;
2821             data.switchTableIndex = currentInstruction[1].u.operand;
2822             data.setFallThroughBytecodeIndex(m_currentIndex + currentInstruction[2].u.operand);
2823             StringJumpTable& table = m_codeBlock->stringSwitchJumpTable(data.switchTableIndex);
2824             StringJumpTable::StringOffsetTable::iterator iter;
2825             StringJumpTable::StringOffsetTable::iterator end = table.offsetTable.end();
2826             for (iter = table.offsetTable.begin(); iter != end; ++iter) {
2827                 unsigned target = m_currentIndex + iter->value.branchOffset;
2828                 if (target == data.fallThroughBytecodeIndex())
2829                     continue;
2830                 data.cases.append(
2831                     SwitchCase::withBytecodeIndex(LazyJSValue::knownStringImpl(iter->key.get()), target));
2832             }
2833             m_graph.m_switchData.append(data);
2834             addToGraph(Switch, OpInfo(&m_graph.m_switchData.last()), get(currentInstruction[3].u.operand));
2835             LAST_OPCODE(op_switch_string);
2836         }
2837
2838         case op_ret:
2839             flushArgumentsAndCapturedVariables();
2840             if (inlineCallFrame()) {
2841                 ASSERT(m_inlineStackTop->m_returnValue != InvalidVirtualRegister);
2842                 setDirect(m_inlineStackTop->m_returnValue, get(currentInstruction[1].u.operand));
2843                 m_inlineStackTop->m_didReturn = true;
2844                 if (m_inlineStackTop->m_unlinkedBlocks.isEmpty()) {
2845                     // If we're returning from the first block, then we're done parsing.
2846                     ASSERT(m_inlineStackTop->m_callsiteBlockHead == m_graph.lastBlock());
2847                     shouldContinueParsing = false;
2848                     LAST_OPCODE(op_ret);
2849                 } else {
2850                     // If inlining created blocks, and we're doing a return, then we need some
2851                     // special linking.
2852                     ASSERT(m_inlineStackTop->m_unlinkedBlocks.last().m_block == m_graph.lastBlock());
2853                     m_inlineStackTop->m_unlinkedBlocks.last().m_needsNormalLinking = false;
2854                 }
2855                 if (m_currentIndex + OPCODE_LENGTH(op_ret) != m_inlineStackTop->m_codeBlock->instructions().size() || m_inlineStackTop->m_didEarlyReturn) {
2856                     ASSERT(m_currentIndex + OPCODE_LENGTH(op_ret) <= m_inlineStackTop->m_codeBlock->instructions().size());
2857                     addToGraph(Jump, OpInfo(0));
2858                     m_inlineStackTop->m_unlinkedBlocks.last().m_needsEarlyReturnLinking = true;
2859                     m_inlineStackTop->m_didEarlyReturn = true;
2860                 }
2861                 LAST_OPCODE(op_ret);
2862             }
2863             addToGraph(Return, get(currentInstruction[1].u.operand));
2864             LAST_OPCODE(op_ret);
2865             
2866         case op_end:
2867             flushArgumentsAndCapturedVariables();
2868             ASSERT(!inlineCallFrame());
2869             addToGraph(Return, get(currentInstruction[1].u.operand));
2870             LAST_OPCODE(op_end);
2871
2872         case op_throw:
2873             addToGraph(Throw, get(currentInstruction[1].u.operand));
2874             flushAllArgumentsAndCapturedVariablesInInlineStack();
2875             addToGraph(Unreachable);
2876             LAST_OPCODE(op_throw);
2877             
2878         case op_throw_static_error:
2879             addToGraph(ThrowReferenceError);
2880             flushAllArgumentsAndCapturedVariablesInInlineStack();
2881             addToGraph(Unreachable);
2882             LAST_OPCODE(op_throw_static_error);
2883             
2884         case op_call:
2885             handleCall(currentInstruction, Call, CodeForCall);
2886             NEXT_OPCODE(op_call);
2887             
2888         case op_construct:
2889             handleCall(currentInstruction, Construct, CodeForConstruct);
2890             NEXT_OPCODE(op_construct);
2891             
2892         case op_call_varargs: {
2893             ASSERT(inlineCallFrame());
2894             ASSERT(currentInstruction[4].u.operand == m_inlineStackTop->m_codeBlock->argumentsRegister());
2895             ASSERT(!m_inlineStackTop->m_codeBlock->symbolTable()->slowArguments());
2896             // It would be cool to funnel this into handleCall() so that it can handle
2897             // inlining. But currently that won't be profitable anyway, since none of the
2898             // uses of call_varargs will be inlineable. So we set this up manually and
2899             // without inline/intrinsic detection.
2900             
2901             SpeculatedType prediction = getPrediction();
2902             
2903             addToGraph(CheckArgumentsNotCreated);
2904             
2905             unsigned argCount = inlineCallFrame()->arguments.size();
2906             if (JSStack::CallFrameHeaderSize + argCount > m_parameterSlots)
2907                 m_parameterSlots = JSStack::CallFrameHeaderSize + argCount;
2908             
2909             addVarArgChild(get(currentInstruction[2].u.operand)); // callee
2910             addVarArgChild(get(currentInstruction[3].u.operand)); // this
2911             for (unsigned argument = 1; argument < argCount; ++argument)
2912                 addVarArgChild(get(argumentToOperand(argument)));
2913             
2914             set(currentInstruction[1].u.operand,
2915                 addToGraph(Node::VarArg, Call, OpInfo(0), OpInfo(prediction)));
2916             
2917             NEXT_OPCODE(op_call_varargs);
2918         }
2919             
2920         case op_jneq_ptr:
2921             // Statically speculate for now. It makes sense to let speculate-only jneq_ptr
2922             // support simmer for a while before making it more general, since it's
2923             // already gnarly enough as it is.
2924             ASSERT(pointerIsFunction(currentInstruction[2].u.specialPointer));
2925             addToGraph(
2926                 CheckFunction,
2927                 OpInfo(actualPointerFor(m_inlineStackTop->m_codeBlock, currentInstruction[2].u.specialPointer)),
2928                 get(currentInstruction[1].u.operand));
2929             addToGraph(Jump, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jneq_ptr)));
2930             LAST_OPCODE(op_jneq_ptr);
2931
2932         case op_resolve_scope: {
2933             unsigned dst = currentInstruction[1].u.operand;
2934             ResolveType resolveType = static_cast<ResolveType>(currentInstruction[3].u.operand);
2935             unsigned depth = currentInstruction[4].u.operand;
2936
2937             // get_from_scope and put_to_scope depend on this watchpoint forcing OSR exit, so they don't add their own watchpoints.
2938             if (needsVarInjectionChecks(resolveType))
2939                 addToGraph(VarInjectionWatchpoint);
2940
2941             switch (resolveType) {
2942             case GlobalProperty:
2943             case GlobalVar:
2944             case GlobalPropertyWithVarInjectionChecks:
2945             case GlobalVarWithVarInjectionChecks:
2946                 set(dst, cellConstant(m_inlineStackTop->m_codeBlock->globalObject()));
2947                 break;
2948             case ClosureVar:
2949             case ClosureVarWithVarInjectionChecks:
2950                 set(dst, getScope(m_inlineStackTop->m_codeBlock->needsActivation(), depth));
2951                 break;
2952             case Dynamic:
2953                 RELEASE_ASSERT_NOT_REACHED();
2954                 break;
2955             }
2956             NEXT_OPCODE(op_resolve_scope);
2957         }
2958
2959         case op_get_from_scope: {
2960             unsigned dst = currentInstruction[1].u.operand;
2961             unsigned scope = currentInstruction[2].u.operand;
2962             unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand];
2963             StringImpl* uid = m_graph.identifiers()[identifierNumber];
2964             ResolveType resolveType = ResolveModeAndType(currentInstruction[4].u.operand).type();
2965
2966             Structure* structure;
2967             uintptr_t operand;
2968             {
2969                 ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
2970                 structure = currentInstruction[5].u.structure.get();
2971                 operand = reinterpret_cast<uintptr_t>(currentInstruction[6].u.pointer);
2972             }
2973
2974             SpeculatedType prediction = getPrediction();
2975             JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
2976
2977             switch (resolveType) {
2978             case GlobalProperty:
2979             case GlobalPropertyWithVarInjectionChecks: {
2980                 GetByIdStatus status = GetByIdStatus::computeFor(*m_vm, structure, uid);
2981                 if (status.takesSlowPath()) {
2982                     set(dst, addToGraph(GetByIdFlush, OpInfo(identifierNumber), OpInfo(prediction), get(scope)));
2983                     break;
2984                 }
2985                 Node* base = cellConstantWithStructureCheck(globalObject, status.structureSet().singletonStructure());
2986                 if (JSValue specificValue = status.specificValue())
2987                     set(dst, cellConstant(specificValue.asCell()));
2988                 else
2989                     set(dst, handleGetByOffset(prediction, base, identifierNumber, operand));
2990                 break;
2991             }
2992             case GlobalVar:
2993             case GlobalVarWithVarInjectionChecks: {
2994                 SymbolTableEntry entry = globalObject->symbolTable()->get(uid);
2995                 if (!entry.couldBeWatched() || !m_graph.watchpoints().isStillValid(entry.watchpointSet())) {
2996                     set(dst, addToGraph(GetGlobalVar, OpInfo(operand), OpInfo(prediction)));
2997                     break;
2998                 }
2999
3000                 addToGraph(GlobalVarWatchpoint, OpInfo(operand), OpInfo(identifierNumber));
3001                 JSValue specificValue = globalObject->registerAt(entry.getIndex()).get();
3002                 set(dst, cellConstant(specificValue.asCell()));
3003                 break;
3004             }
3005             case ClosureVar:
3006             case ClosureVarWithVarInjectionChecks:
3007                 set(dst, 
3008                     addToGraph(GetClosureVar, OpInfo(operand), OpInfo(prediction), 
3009                         addToGraph(GetClosureRegisters, get(scope))));
3010                 break;
3011             case Dynamic:
3012                 RELEASE_ASSERT_NOT_REACHED();
3013                 break;
3014             }
3015             NEXT_OPCODE(op_get_from_scope);
3016         }
3017
3018         case op_put_to_scope: {
3019             unsigned scope = currentInstruction[1].u.operand;
3020             unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
3021             unsigned value = currentInstruction[3].u.operand;
3022             ResolveType resolveType = ResolveModeAndType(currentInstruction[4].u.operand).type();
3023             StringImpl* uid = m_graph.identifiers()[identifierNumber];
3024
3025             Structure* structure;
3026             uintptr_t operand;
3027             {
3028                 ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
3029                 structure = currentInstruction[5].u.structure.get();
3030                 operand = reinterpret_cast<uintptr_t>(currentInstruction[6].u.pointer);
3031             }
3032
3033             JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject();
3034
3035             switch (resolveType) {
3036             case GlobalProperty:
3037             case GlobalPropertyWithVarInjectionChecks: {
3038                 PutByIdStatus status = PutByIdStatus::computeFor(*m_vm, globalObject, structure, uid, false);
3039                 if (!status.isSimpleReplace()) {
3040                     addToGraph(PutById, OpInfo(identifierNumber), get(scope), get(value));
3041                     break;
3042                 }
3043                 Node* base = cellConstantWithStructureCheck(globalObject, status.oldStructure());
3044                 handlePutByOffset(base, identifierNumber, static_cast<PropertyOffset>(operand), get(value));
3045                 break;
3046             }
3047             case GlobalVar:
3048             case GlobalVarWithVarInjectionChecks: {
3049                 SymbolTableEntry entry = globalObject->symbolTable()->get(uid);
3050                 ASSERT(!entry.couldBeWatched() || !m_graph.watchpoints().isStillValid(entry.watchpointSet()));
3051                 addToGraph(PutGlobalVar, OpInfo(operand), get(value));
3052                 break;
3053             }
3054             case ClosureVar:
3055             case ClosureVarWithVarInjectionChecks: {
3056                 Node* scopeNode = get(scope);
3057                 Node* scopeRegisters = addToGraph(GetClosureRegisters, scopeNode);
3058                 addToGraph(PutClosureVar, OpInfo(operand), scopeNode, scopeRegisters, get(value));
3059                 break;
3060             }
3061             case Dynamic:
3062                 RELEASE_ASSERT_NOT_REACHED();
3063                 break;
3064             }
3065             NEXT_OPCODE(op_put_to_scope);
3066         }
3067
3068         case op_loop_hint: {
3069             // Baseline->DFG OSR jumps between loop hints. The DFG assumes that Baseline->DFG
3070             // OSR can only happen at basic block boundaries. Assert that these two statements
3071             // are compatible.
3072             RELEASE_ASSERT(m_currentIndex == blockBegin);
3073             
3074             // We never do OSR into an inlined code block. That could not happen, since OSR
3075             // looks up the code block that is the replacement for the baseline JIT code
3076             // block. Hence, machine code block = true code block = not inline code block.
3077             if (!m_inlineStackTop->m_caller)
3078                 m_currentBlock->isOSRTarget = true;
3079
3080             if (m_vm->watchdog.isEnabled())
3081                 addToGraph(CheckWatchdogTimer);
3082             else {
3083                 // Emit a phantom node to ensure that there is a placeholder
3084                 // node for this bytecode op.
3085                 addToGraph(Phantom);
3086             }
3087             
3088             NEXT_OPCODE(op_loop_hint);
3089         }
3090             
3091         case op_init_lazy_reg: {
3092             set(currentInstruction[1].u.operand, getJSConstantForValue(JSValue()));
3093             NEXT_OPCODE(op_init_lazy_reg);
3094         }
3095             
3096         case op_create_activation: {
3097             set(currentInstruction[1].u.operand, addToGraph(CreateActivation, get(currentInstruction[1].u.operand)));
3098             NEXT_OPCODE(op_create_activation);
3099         }
3100             
3101         case op_create_arguments: {
3102             m_graph.m_hasArguments = true;
3103             Node* createArguments = addToGraph(CreateArguments, get(currentInstruction[1].u.operand));
3104             set(currentInstruction[1].u.operand, createArguments);
3105             set(unmodifiedArgumentsRegister(currentInstruction[1].u.operand), createArguments);
3106             NEXT_OPCODE(op_create_arguments);
3107         }
3108             
3109         case op_tear_off_activation: {
3110             addToGraph(TearOffActivation, get(currentInstruction[1].u.operand));
3111             NEXT_OPCODE(op_tear_off_activation);
3112         }
3113
3114         case op_tear_off_arguments: {
3115             m_graph.m_hasArguments = true;
3116             addToGraph(TearOffArguments, get(unmodifiedArgumentsRegister(currentInstruction[1].u.operand)), get(currentInstruction[2].u.operand));
3117             NEXT_OPCODE(op_tear_off_arguments);
3118         }
3119             
3120         case op_get_arguments_length: {
3121             m_graph.m_hasArguments = true;
3122             set(currentInstruction[1].u.operand, addToGraph(GetMyArgumentsLengthSafe));
3123             NEXT_OPCODE(op_get_arguments_length);
3124         }
3125             
3126         case op_get_argument_by_val: {
3127             m_graph.m_hasArguments = true;
3128             set(currentInstruction[1].u.operand,
3129                 addToGraph(
3130                     GetMyArgumentByValSafe, OpInfo(0), OpInfo(getPrediction()),
3131                     get(currentInstruction[3].u.operand)));
3132             NEXT_OPCODE(op_get_argument_by_val);
3133         }
3134             
3135         case op_new_func: {
3136             if (!currentInstruction[3].u.operand) {
3137                 set(currentInstruction[1].u.operand,
3138                     addToGraph(NewFunctionNoCheck, OpInfo(currentInstruction[2].u.operand)));
3139             } else {
3140                 set(currentInstruction[1].u.operand,
3141                     addToGraph(
3142                         NewFunction,
3143                         OpInfo(currentInstruction[2].u.operand),
3144                         get(currentInstruction[1].u.operand)));
3145             }
3146             NEXT_OPCODE(op_new_func);
3147         }
3148             
3149         case op_new_func_exp: {
3150             set(currentInstruction[1].u.operand,
3151                 addToGraph(NewFunctionExpression, OpInfo(currentInstruction[2].u.operand)));
3152             NEXT_OPCODE(op_new_func_exp);
3153         }
3154
3155         case op_typeof: {
3156             set(currentInstruction[1].u.operand,
3157                 addToGraph(TypeOf, get(currentInstruction[2].u.operand)));
3158             NEXT_OPCODE(op_typeof);
3159         }
3160
3161         case op_to_number: {
3162             set(currentInstruction[1].u.operand,
3163                 addToGraph(Identity, Edge(get(currentInstruction[2].u.operand), NumberUse)));
3164             NEXT_OPCODE(op_to_number);
3165         }
3166             
3167         case op_in: {
3168             set(currentInstruction[1].u.operand,
3169                 addToGraph(In, get(currentInstruction[2].u.operand), get(currentInstruction[3].u.operand)));
3170             NEXT_OPCODE(op_in);
3171         }
3172
3173         default:
3174             // Parse failed! This should not happen because the capabilities checker
3175             // should have caught it.
3176             RELEASE_ASSERT_NOT_REACHED();
3177             return false;
3178         }
3179     }
3180 }
3181
3182 void ByteCodeParser::linkBlock(BasicBlock* block, Vector<BasicBlock*>& possibleTargets)
3183 {
3184     ASSERT(!block->isLinked);
3185     ASSERT(!block->isEmpty());
3186     Node* node = block->last();
3187     ASSERT(node->isTerminal());
3188     
3189     switch (node->op()) {
3190     case Jump:
3191         node->setTakenBlock(blockForBytecodeOffset(possibleTargets, node->takenBytecodeOffsetDuringParsing()));
3192 #if DFG_ENABLE(DEBUG_VERBOSE)
3193         dataLogF("Linked basic block %p to %p, #%u.\n", block, node->takenBlock(), node->takenBlock()->index);
3194 #endif
3195         break;
3196         
3197     case Branch:
3198         node->setTakenBlock(blockForBytecodeOffset(possibleTargets, node->takenBytecodeOffsetDuringParsing()));
3199         node->setNotTakenBlock(blockForBytecodeOffset(possibleTargets, node->notTakenBytecodeOffsetDuringParsing()));
3200 #if DFG_ENABLE(DEBUG_VERBOSE)
3201         dataLogF("Linked basic block %p to %p, #%u and %p, #%u.\n", block, node->takenBlock(), node->takenBlock()->index, node->notTakenBlock(), node->notTakenBlock()->index);
3202 #endif
3203         break;
3204         
3205     case Switch:
3206         for (unsigned i = node->switchData()->cases.size(); i--;)
3207             node->switchData()->cases[i].target = blockForBytecodeOffset(possibleTargets, node->switchData()->cases[i].targetBytecodeIndex());
3208         node->switchData()->fallThrough = blockForBytecodeOffset(possibleTargets, node->switchData()->fallThroughBytecodeIndex());
3209         break;
3210         
3211     default:
3212 #if DFG_ENABLE(DEBUG_VERBOSE)
3213         dataLogF("Marking basic block %p as linked.\n", block);
3214 #endif
3215         break;
3216     }
3217     
3218 #if !ASSERT_DISABLED
3219     block->isLinked = true;
3220 #endif
3221 }
3222
3223 void ByteCodeParser::linkBlocks(Vector<UnlinkedBlock>& unlinkedBlocks, Vector<BasicBlock*>& possibleTargets)
3224 {
3225     for (size_t i = 0; i < unlinkedBlocks.size(); ++i) {
3226         if (unlinkedBlocks[i].m_needsNormalLinking) {
3227             linkBlock(unlinkedBlocks[i].m_block, possibleTargets);
3228             unlinkedBlocks[i].m_needsNormalLinking = false;
3229         }
3230     }
3231 }
3232
3233 void ByteCodeParser::buildOperandMapsIfNecessary()
3234 {
3235     if (m_haveBuiltOperandMaps)
3236         return;
3237     
3238     for (size_t i = 0; i < m_codeBlock->numberOfIdentifiers(); ++i)
3239         m_identifierMap.add(m_codeBlock->identifier(i).impl(), i);
3240     for (size_t i = 0; i < m_codeBlock->numberOfConstantRegisters(); ++i) {
3241         JSValue value = m_codeBlock->getConstant(i + FirstConstantRegisterIndex);
3242         if (!value)
3243             m_emptyJSValueIndex = i + FirstConstantRegisterIndex;
3244         else
3245             m_jsValueMap.add(JSValue::encode(value), i + FirstConstantRegisterIndex);
3246     }
3247     
3248     m_haveBuiltOperandMaps = true;
3249 }
3250
3251 ByteCodeParser::InlineStackEntry::InlineStackEntry(
3252     ByteCodeParser* byteCodeParser,
3253     CodeBlock* codeBlock,
3254     CodeBlock* profiledBlock,
3255     BasicBlock* callsiteBlockHead,
3256     JSFunction* callee, // Null if this is a closure call.
3257     VirtualRegister returnValueVR,
3258     VirtualRegister inlineCallFrameStart,
3259     int argumentCountIncludingThis,
3260     CodeSpecializationKind kind)
3261     : m_byteCodeParser(byteCodeParser)
3262     , m_codeBlock(codeBlock)
3263     , m_profiledBlock(profiledBlock)
3264     , m_callsiteBlockHead(callsiteBlockHead)
3265     , m_returnValue(returnValueVR)
3266     , m_didReturn(false)
3267     , m_didEarlyReturn(false)
3268     , m_caller(byteCodeParser->m_inlineStackTop)
3269 {
3270     {
3271         ConcurrentJITLocker locker(m_profiledBlock->m_lock);
3272         m_lazyOperands.initialize(locker, m_profiledBlock->lazyOperandValueProfiles());
3273         m_exitProfile.initialize(locker, profiledBlock->exitProfile());
3274     }
3275     
3276     m_argumentPositions.resize(argumentCountIncludingThis);
3277     for (int i = 0; i < argumentCountIncludingThis; ++i) {
3278         byteCodeParser->m_graph.m_argumentPositions.append(ArgumentPosition());
3279         ArgumentPosition* argumentPosition = &byteCodeParser->m_graph.m_argumentPositions.last();
3280         m_argumentPositions[i] = argumentPosition;
3281     }
3282     
3283     // Track the code-block-global exit sites.
3284     if (m_exitProfile.hasExitSite(ArgumentsEscaped)) {
3285         byteCodeParser->m_graph.m_executablesWhoseArgumentsEscaped.add(
3286             codeBlock->ownerExecutable());
3287     }
3288         
3289     if (m_caller) {
3290         // Inline case.
3291         ASSERT(codeBlock != byteCodeParser->m_codeBlock);
3292         ASSERT(inlineCallFrameStart != InvalidVirtualRegister);
3293         ASSERT(callsiteBlockHead);
3294         
3295         InlineCallFrame inlineCallFrame;
3296         initializeLazyWriteBarrierForInlineCallFrameExecutable(
3297             byteCodeParser->m_graph.m_plan.writeBarriers,
3298             inlineCallFrame.executable,
3299             byteCodeParser->m_codeBlock,
3300             byteCodeParser->m_codeBlock->inlineCallFrames().size(),
3301             byteCodeParser->m_codeBlock->ownerExecutable(), 
3302             codeBlock->ownerExecutable());
3303         inlineCallFrame.stackOffset = inlineCallFrameStart + JSStack::CallFrameHeaderSize;
3304         if (callee) {
3305             initializeLazyWriteBarrierForInlineCallFrameCallee(
3306                 byteCodeParser->m_graph.m_plan.writeBarriers,
3307                 inlineCallFrame.callee,
3308                 byteCodeParser->m_codeBlock,
3309                 byteCodeParser->m_codeBlock->inlineCallFrames().size(),
3310                 byteCodeParser->m_codeBlock->ownerExecutable(), 
3311                 callee);
3312         }
3313         inlineCallFrame.caller = byteCodeParser->currentCodeOrigin();
3314         inlineCallFrame.arguments.resize(argumentCountIncludingThis); // Set the number of arguments including this, but don't configure the value recoveries, yet.
3315         inlineCallFrame.isCall = isCall(kind);
3316         
3317         if (inlineCallFrame.caller.inlineCallFrame)
3318             inlineCallFrame.capturedVars = inlineCallFrame.caller.inlineCallFrame->capturedVars;
3319         else {
3320             for (int i = byteCodeParser->m_codeBlock->m_numVars; i--;) {
3321                 if (byteCodeParser->m_codeBlock->isCaptured(i))
3322                     inlineCallFrame.capturedVars.set(i);
3323             }
3324         }
3325
3326         for (int i = argumentCountIncludingThis; i--;) {
3327             if (codeBlock->isCaptured(argumentToOperand(i)))
3328                 inlineCallFrame.capturedVars.set(argumentToOperand(i) + inlineCallFrame.stackOffset);
3329         }
3330         for (size_t i = codeBlock->m_numVars; i--;) {
3331             if (codeBlock->isCaptured(i))
3332                 inlineCallFrame.capturedVars.set(i + inlineCallFrame.stackOffset);
3333         }
3334
3335 #if DFG_ENABLE(DEBUG_VERBOSE)
3336         dataLogF("Current captured variables: ");
3337         inlineCallFrame.capturedVars.dump(WTF::dataFile());
3338         dataLogF("\n");
3339 #endif
3340         
3341         byteCodeParser->m_codeBlock->inlineCallFrames().append(inlineCallFrame);
3342         m_inlineCallFrame = &byteCodeParser->m_codeBlock->inlineCallFrames().last();
3343         
3344         byteCodeParser->buildOperandMapsIfNecessary();
3345         
3346         m_identifierRemap.resize(codeBlock->numberOfIdentifiers());
3347         m_constantRemap.resize(codeBlock->numberOfConstantRegisters());
3348         m_constantBufferRemap.resize(codeBlock->numberOfConstantBuffers());
3349         m_switchRemap.resize(codeBlock->numberOfSwitchJumpTables());
3350
3351         for (size_t i = 0; i < codeBlock->numberOfIdentifiers(); ++i) {
3352             StringImpl* rep = codeBlock->identifier(i).impl();
3353             BorrowedIdentifierMap::AddResult result = byteCodeParser->m_identifierMap.add(rep, byteCodeParser->m_graph.identifiers().numberOfIdentifiers());
3354             if (result.isNewEntry)
3355                 byteCodeParser->m_graph.identifiers().addLazily(rep);
3356             m_identifierRemap[i] = result.iterator->value;
3357         }
3358         for (size_t i = 0; i < codeBlock->numberOfConstantRegisters(); ++i) {
3359             JSValue value = codeBlock->getConstant(i + FirstConstantRegisterIndex);
3360             if (!value) {
3361                 if (byteCodeParser->m_emptyJSValueIndex == UINT_MAX) {
3362                     byteCodeParser->m_emptyJSValueIndex = byteCodeParser->m_codeBlock->numberOfConstantRegisters() + FirstConstantRegisterIndex;
3363                     byteCodeParser->addConstant(JSValue());
3364                     byteCodeParser->m_constants.append(ConstantRecord());
3365                 }
3366                 m_constantRemap[i] = byteCodeParser->m_emptyJSValueIndex;
3367                 continue;
3368             }
3369             JSValueMap::AddResult result = byteCodeParser->m_jsValueMap.add(JSValue::encode(value), byteCodeParser->m_codeBlock->numberOfConstantRegisters() + FirstConstantRegisterIndex);
3370             if (result.isNewEntry) {
3371                 byteCodeParser->addConstant(value);
3372                 byteCodeParser->m_constants.append(ConstantRecord());
3373             }
3374             m_constantRemap[i] = result.iterator->value;
3375         }
3376         for (unsigned i = 0; i < codeBlock->numberOfConstantBuffers(); ++i) {
3377             // If we inline the same code block multiple times, we don't want to needlessly
3378             // duplicate its constant buffers.
3379             HashMap<ConstantBufferKey, unsigned>::iterator iter =
3380                 byteCodeParser->m_constantBufferCache.find(ConstantBufferKey(codeBlock, i));
3381             if (iter != byteCodeParser->m_constantBufferCache.end()) {
3382                 m_constantBufferRemap[i] = iter->value;
3383                 continue;
3384             }
3385             Vector<JSValue>& buffer = codeBlock->constantBufferAsVector(i);
3386             unsigned newIndex = byteCodeParser->m_codeBlock->addConstantBuffer(buffer);
3387             m_constantBufferRemap[i] = newIndex;
3388             byteCodeParser->m_constantBufferCache.add(ConstantBufferKey(codeBlock, i), newIndex);
3389         }
3390         for (unsigned i = 0; i < codeBlock->numberOfSwitchJumpTables(); ++i) {
3391             m_switchRemap[i] = byteCodeParser->m_codeBlock->numberOfSwitchJumpTables();
3392             byteCodeParser->m_codeBlock->addSwitchJumpTable() = codeBlock->switchJumpTable(i);
3393         }
3394         m_callsiteBlockHeadNeedsLinking = true;
3395     } else {
3396         // Machine code block case.
3397         ASSERT(codeBlock == byteCodeParser->m_codeBlock);
3398         ASSERT(!callee);
3399         ASSERT(returnValueVR == InvalidVirtualRegister);
3400         ASSERT(inlineCallFrameStart == InvalidVirtualRegister);
3401         ASSERT(!callsiteBlockHead);
3402
3403         m_inlineCallFrame = 0;
3404
3405         m_identifierRemap.resize(codeBlock->numberOfIdentifiers());
3406         m_constantRemap.resize(codeBlock->numberOfConstantRegisters());
3407         m_constantBufferRemap.resize(codeBlock->numberOfConstantBuffers());
3408         m_switchRemap.resize(codeBlock->numberOfSwitchJumpTables());
3409         for (size_t i = 0; i < codeBlock->numberOfIdentifiers(); ++i)
3410             m_identifierRemap[i] = i;
3411         for (size_t i = 0; i < codeBlock->numberOfConstantRegisters(); ++i)
3412             m_constantRemap[i] = i + FirstConstantRegisterIndex;
3413         for (size_t i = 0; i < codeBlock->numberOfConstantBuffers(); ++i)
3414             m_constantBufferRemap[i] = i;
3415         for (size_t i = 0; i < codeBlock->numberOfSwitchJumpTables(); ++i)
3416             m_switchRemap[i] = i;
3417         m_callsiteBlockHeadNeedsLinking = false;
3418     }
3419     
3420     for (size_t i = 0; i < m_constantRemap.size(); ++i)
3421         ASSERT(m_constantRemap[i] >= static_cast<unsigned>(FirstConstantRegisterIndex));
3422     
3423     byteCodeParser->m_inlineStackTop = this;
3424 }
3425
3426 void ByteCodeParser::parseCodeBlock()
3427 {
3428     CodeBlock* codeBlock = m_inlineStackTop->m_codeBlock;
3429     
3430     if (m_graph.compilation()) {
3431         m_graph.compilation()->addProfiledBytecodes(
3432             *m_vm->m_perBytecodeProfiler, m_inlineStackTop->m_profiledBlock);
3433     }
3434     
3435     bool shouldDumpBytecode = Options::dumpBytecodeAtDFGTime();
3436 #if DFG_ENABLE(DEBUG_VERBOSE)
3437     shouldDumpBytecode |= true;
3438 #endif
3439     if (shouldDumpBytecode) {
3440         dataLog("Parsing ", *codeBlock);
3441         if (inlineCallFrame()) {
3442             dataLog(
3443                 " for inlining at ", CodeBlockWithJITType(m_codeBlock, JITCode::DFGJIT),
3444                 " ", inlineCallFrame()->caller);
3445         }
3446         dataLog(
3447             ": captureCount = ", codeBlock->symbolTable() ? codeBlock->symbolTable()->captureCount() : 0,
3448             ", needsFullScopeChain = ", codeBlock->needsFullScopeChain(),
3449             ", needsActivation = ", codeBlock->ownerExecutable()->needsActivation(),
3450             ", isStrictMode = ", codeBlock->ownerExecutable()->isStrictMode(), "\n");
3451         codeBlock->baselineVersion()->dumpBytecode();
3452     }
3453     
3454     Vector<unsigned, 32> jumpTargets;
3455     computePreciseJumpTargets(codeBlock, jumpTargets);
3456     if (Options::dumpBytecodeAtDFGTime()) {
3457         dataLog("Jump targets: ");
3458         CommaPrinter comma;
3459         for (unsigned i = 0; i < jumpTargets.size(); ++i)
3460             dataLog(comma, jumpTargets[i]);
3461         dataLog("\n");
3462     }
3463     
3464     for (unsigned jumpTargetIndex = 0; jumpTargetIndex <= jumpTargets.size(); ++jumpTargetIndex) {
3465         // The maximum bytecode offset to go into the current basicblock is either the next jump target, or the end of the instructions.
3466         unsigned limit = jumpTargetIndex < jumpTargets.size() ? jumpTargets[jumpTargetIndex] : codeBlock->instructions().size();
3467 #if DFG_ENABLE(DEBUG_VERBOSE)
3468         dataLog(
3469             "Parsing bytecode with limit ", pointerDump(inlineCallFrame()),
3470             " bc#", limit, " at inline depth ",
3471             CodeOrigin::inlineDepthForCallFrame(inlineCallFrame()), ".\n");
3472 #endif
3473         ASSERT(m_currentIndex < limit);
3474
3475         // Loop until we reach the current limit (i.e. next jump target).
3476         do {
3477             if (!m_currentBlock) {
3478                 // Check if we can use the last block.
3479                 if (m_graph.numBlocks() && m_graph.lastBlock()->isEmpty()) {
3480                     // This must be a block belonging to us.
3481                     ASSERT(m_inlineStackTop->m_unlinkedBlocks.last().m_block == m_graph.lastBlock());
3482                     // Either the block is linkable or it isn't. If it's linkable then it's the last
3483                     // block in the blockLinkingTargets list. If it's not then the last block will
3484                     // have a lower bytecode index that the one we're about to give to this block.
3485                     if (m_inlineStackTop->m_blockLinkingTargets.isEmpty() || m_inlineStackTop->m_blockLinkingTargets.last()->bytecodeBegin != m_currentIndex) {
3486                         // Make the block linkable.
3487                         ASSERT(m_inlineStackTop->m_blockLinkingTargets.isEmpty() || m_inlineStackTop->m_blockLinkingTargets.last()->bytecodeBegin < m_currentIndex);
3488                         m_inlineStackTop->m_blockLinkingTargets.append(m_graph.lastBlock());
3489                     }
3490                     // Change its bytecode begin and continue.
3491                     m_currentBlock = m_graph.lastBlock();
3492 #if DFG_ENABLE(DEBUG_VERBOSE)
3493                     dataLogF("Reascribing bytecode index of block %p from bc#%u to bc#%u (peephole case).\n", m_currentBlock, m_currentBlock->bytecodeBegin, m_currentIndex);
3494 #endif
3495                     m_currentBlock->bytecodeBegin = m_currentIndex;
3496                 } else {
3497                     RefPtr<BasicBlock> block = adoptRef(new BasicBlock(m_currentIndex, m_numArguments, m_numLocals));
3498 #if DFG_ENABLE(DEBUG_VERBOSE)
3499                     dataLogF("Creating basic block %p, #%zu for %p bc#%u at inline depth %u.\n", block.get(), m_graph.numBlocks(), m_inlineStackTop->executable(), m_currentIndex, CodeOrigin::inlineDepthForCallFrame(inlineCallFrame()));
3500 #endif
3501                     m_currentBlock = block.get();
3502                     // This assertion checks two things:
3503                     // 1) If the bytecodeBegin is greater than currentIndex, then something has gone
3504                     //    horribly wrong. So, we're probably generating incorrect code.
3505                     // 2) If the bytecodeBegin is equal to the currentIndex, then we failed to do
3506                     //    a peephole coalescing of this block in the if statement above. So, we're
3507                     //    generating suboptimal code and leaving more work for the CFG simplifier.
3508                     ASSERT(m_inlineStackTop->m_unlinkedBlocks.isEmpty() || m_inlineStackTop->m_unlinkedBlocks.last().m_block->bytecodeBegin < m_currentIndex);
3509                     m_inlineStackTop->m_unlinkedBlocks.append(UnlinkedBlock(block.get()));
3510                     m_inlineStackTop->m_blockLinkingTargets.append(block.get());
3511                     // The first block is definitely an OSR target.
3512                     if (!m_graph.numBlocks())
3513                         block->isOSRTarget = true;
3514                     m_graph.appendBlock(block);
3515                     prepareToParseBlock();
3516                 }
3517             }
3518
3519             bool shouldContinueParsing = parseBlock(limit);
3520
3521             // We should not have gone beyond the limit.
3522             ASSERT(m_currentIndex <= limit);
3523             
3524             // We should have planted a terminal, or we just gave up because
3525             // we realized that the jump target information is imprecise, or we
3526             // are at the end of an inline function, or we realized that we
3527             // should stop parsing because there was a return in the first
3528