Get rid of method_check
[WebKit-https.git] / Source / JavaScriptCore / bytecode / CodeBlock.h
1 /*
2  * Copyright (C) 2008, 2009, 2010, 2011, 2012 Apple Inc. All rights reserved.
3  * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1.  Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  * 2.  Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  * 3.  Neither the name of Apple Computer, Inc. ("Apple") nor the names of
15  *     its contributors may be used to endorse or promote products derived
16  *     from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
19  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21  * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
22  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29
30 #ifndef CodeBlock_h
31 #define CodeBlock_h
32
33 #include "ArrayProfile.h"
34 #include "ByValInfo.h"
35 #include "BytecodeConventions.h"
36 #include "CallLinkInfo.h"
37 #include "CallReturnOffsetToBytecodeOffset.h"
38 #include "CodeOrigin.h"
39 #include "CodeType.h"
40 #include "Comment.h"
41 #include "CompactJITCodeMap.h"
42 #include "DFGCodeBlocks.h"
43 #include "DFGCommon.h"
44 #include "DFGExitProfile.h"
45 #include "DFGMinifiedGraph.h"
46 #include "DFGOSREntry.h"
47 #include "DFGOSRExit.h"
48 #include "DFGVariableEventStream.h"
49 #include "EvalCodeCache.h"
50 #include "ExecutionCounter.h"
51 #include "ExpressionRangeInfo.h"
52 #include "HandlerInfo.h"
53 #include "Options.h"
54 #include "Instruction.h"
55 #include "JITCode.h"
56 #include "JITWriteBarrier.h"
57 #include "JSGlobalObject.h"
58 #include "JumpReplacementWatchpoint.h"
59 #include "JumpTable.h"
60 #include "LLIntCallLinkInfo.h"
61 #include "LazyOperandValueProfile.h"
62 #include "LineInfo.h"
63 #include "Nodes.h"
64 #include "RegExpObject.h"
65 #include "ResolveOperation.h"
66 #include "StructureStubInfo.h"
67 #include "UnconditionalFinalizer.h"
68 #include "ValueProfile.h"
69 #include "Watchpoint.h"
70 #include <wtf/RefCountedArray.h>
71 #include <wtf/FastAllocBase.h>
72 #include <wtf/PassOwnPtr.h>
73 #include <wtf/Platform.h>
74 #include <wtf/RefPtr.h>
75 #include <wtf/SegmentedVector.h>
76 #include <wtf/Vector.h>
77 #include <wtf/text/WTFString.h>
78
79 // Set ENABLE_BYTECODE_COMMENTS to 1 to enable recording bytecode generator
80 // comments for the bytecodes that it generates. This will allow
81 // CodeBlock::dump() to provide some contextual info about the bytecodes.
82 //
83 // The way this comment system works is as follows:
84 // 1. The BytecodeGenerator calls prependComment() with a constant comment
85 //    string in .text. The string must not be a stack or heap allocated
86 //    string.
87 // 2. When the BytecodeGenerator's emitOpcode() is called, the last
88 //    prepended comment will be recorded with the PC of the opcode being
89 //    emitted. This comment is being recorded in the CodeBlock's
90 //    m_bytecodeComments.
91 // 3. When CodeBlock::dump() is called, it will pair up the comments with
92 //    their corresponding bytecodes based on the bytecode and comment's
93 //    PC. If a matching pair is found, the comment will be printed after
94 //    the bytecode. If not, no comment is printed.
95 //
96 // NOTE: Enabling this will consume additional memory at runtime to store
97 // the comments. Since these comments are only useful for VM debugging
98 // (as opposed to app debugging), this feature is to be disabled by default,
99 // and can be enabled as needed for VM development use only.
100
101 #define ENABLE_BYTECODE_COMMENTS 0
102
103 namespace JSC {
104
105     class DFGCodeBlocks;
106     class ExecState;
107     class LLIntOffsetsExtractor;
108     class RepatchBuffer;
109
110     inline int unmodifiedArgumentsRegister(int argumentsRegister) { return argumentsRegister - 1; }
111
112     static ALWAYS_INLINE int missingThisObjectMarker() { return std::numeric_limits<int>::max(); }
113
114     class CodeBlock : public UnconditionalFinalizer, public WeakReferenceHarvester {
115         WTF_MAKE_FAST_ALLOCATED;
116         friend class JIT;
117         friend class LLIntOffsetsExtractor;
118     public:
119         enum CopyParsedBlockTag { CopyParsedBlock };
120     protected:
121         CodeBlock(CopyParsedBlockTag, CodeBlock& other);
122         
123         CodeBlock(ScriptExecutable* ownerExecutable, CodeType, JSGlobalObject*, PassRefPtr<SourceProvider>, unsigned sourceOffset, bool isConstructor, PassOwnPtr<CodeBlock> alternative);
124
125         WriteBarrier<JSGlobalObject> m_globalObject;
126         Heap* m_heap;
127
128     public:
129         JS_EXPORT_PRIVATE virtual ~CodeBlock();
130         
131         int numParameters() const { return m_numParameters; }
132         void setNumParameters(int newValue);
133         void addParameter();
134         
135         int* addressOfNumParameters() { return &m_numParameters; }
136         static ptrdiff_t offsetOfNumParameters() { return OBJECT_OFFSETOF(CodeBlock, m_numParameters); }
137
138         CodeBlock* alternative() { return m_alternative.get(); }
139         PassOwnPtr<CodeBlock> releaseAlternative() { return m_alternative.release(); }
140         void setAlternative(PassOwnPtr<CodeBlock> alternative) { m_alternative = alternative; }
141         
142         CodeSpecializationKind specializationKind()
143         {
144             if (m_isConstructor)
145                 return CodeForConstruct;
146             return CodeForCall;
147         }
148         
149 #if ENABLE(JIT)
150         CodeBlock* baselineVersion()
151         {
152             CodeBlock* result = replacement();
153             if (!result)
154                 return 0; // This can happen if we're in the process of creating the baseline version.
155             while (result->alternative())
156                 result = result->alternative();
157             ASSERT(result);
158             ASSERT(JITCode::isBaselineCode(result->getJITType()));
159             return result;
160         }
161 #endif
162
163         void visitAggregate(SlotVisitor&);
164
165         static void dumpStatistics();
166
167         void dump(ExecState*);
168         void printStructures(const Instruction*);
169         void printStructure(const char* name, const Instruction*, int operand);
170
171         bool isStrictMode() const { return m_isStrictMode; }
172
173         inline bool isKnownNotImmediate(int index)
174         {
175             if (index == m_thisRegister && !m_isStrictMode)
176                 return true;
177
178             if (isConstantRegisterIndex(index))
179                 return getConstant(index).isCell();
180
181             return false;
182         }
183
184         ALWAYS_INLINE bool isTemporaryRegisterIndex(int index)
185         {
186             return index >= m_numVars;
187         }
188
189         void dumpBytecodeCommentAndNewLine(int location);
190 #if ENABLE(BYTECODE_COMMENTS)
191         const char* commentForBytecodeOffset(unsigned bytecodeOffset);
192         void dumpBytecodeComments();
193 #endif
194
195         HandlerInfo* handlerForBytecodeOffset(unsigned bytecodeOffset);
196         int lineNumberForBytecodeOffset(unsigned bytecodeOffset);
197         void expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot, int& startOffset, int& endOffset);
198
199         uint32_t addResolve()
200         {
201             m_resolveOperations.grow(m_resolveOperations.size() + 1);
202             return m_resolveOperations.size() - 1;
203         }
204         uint32_t addPutToBase()
205         {
206             m_putToBaseOperations.append(PutToBaseOperation(isStrictMode()));
207             return m_putToBaseOperations.size() - 1;
208         }
209
210         ResolveOperations* resolveOperations(uint32_t i)
211         {
212             return &m_resolveOperations[i];
213         }
214
215         PutToBaseOperation* putToBaseOperation(uint32_t i)
216         {
217             return &m_putToBaseOperations[i];
218         }
219
220         size_t numberOfResolveOperations() const { return m_resolveOperations.size(); }
221         size_t numberOfPutToBaseOperations() const { return m_putToBaseOperations.size(); }
222
223 #if ENABLE(JIT)
224
225         StructureStubInfo& getStubInfo(ReturnAddressPtr returnAddress)
226         {
227             return *(binarySearch<StructureStubInfo, void*, getStructureStubInfoReturnLocation>(m_structureStubInfos.begin(), m_structureStubInfos.size(), returnAddress.value()));
228         }
229
230         StructureStubInfo& getStubInfo(unsigned bytecodeIndex)
231         {
232             return *(binarySearch<StructureStubInfo, unsigned, getStructureStubInfoBytecodeIndex>(m_structureStubInfos.begin(), m_structureStubInfos.size(), bytecodeIndex));
233         }
234         
235         void resetStub(StructureStubInfo&);
236         
237         ByValInfo& getByValInfo(unsigned bytecodeIndex)
238         {
239             return *(binarySearch<ByValInfo, unsigned, getByValInfoBytecodeIndex>(m_byValInfos.begin(), m_byValInfos.size(), bytecodeIndex));
240         }
241
242         CallLinkInfo& getCallLinkInfo(ReturnAddressPtr returnAddress)
243         {
244             return *(binarySearch<CallLinkInfo, void*, getCallLinkInfoReturnLocation>(m_callLinkInfos.begin(), m_callLinkInfos.size(), returnAddress.value()));
245         }
246         
247         CallLinkInfo& getCallLinkInfo(unsigned bytecodeIndex)
248         {
249             return *(binarySearch<CallLinkInfo, unsigned, getCallLinkInfoBytecodeIndex>(m_callLinkInfos.begin(), m_callLinkInfos.size(), bytecodeIndex));
250         }
251 #endif // ENABLE(JIT)
252
253 #if ENABLE(LLINT)
254         Instruction* adjustPCIfAtCallSite(Instruction*);
255 #endif
256         unsigned bytecodeOffset(ExecState*, ReturnAddressPtr);
257
258 #if ENABLE(JIT)
259         unsigned bytecodeOffsetForCallAtIndex(unsigned index)
260         {
261             if (!m_rareData)
262                 return 1;
263             Vector<CallReturnOffsetToBytecodeOffset>& callIndices = m_rareData->m_callReturnIndexVector;
264             if (!callIndices.size())
265                 return 1;
266             ASSERT(index < m_rareData->m_callReturnIndexVector.size());
267             return m_rareData->m_callReturnIndexVector[index].bytecodeOffset;
268         }
269
270         void unlinkCalls();
271         
272         bool hasIncomingCalls() { return m_incomingCalls.begin() != m_incomingCalls.end(); }
273         
274         void linkIncomingCall(CallLinkInfo* incoming)
275         {
276             m_incomingCalls.push(incoming);
277         }
278 #endif // ENABLE(JIT)
279
280 #if ENABLE(LLINT)
281         void linkIncomingCall(LLIntCallLinkInfo* incoming)
282         {
283             m_incomingLLIntCalls.push(incoming);
284         }
285 #endif // ENABLE(LLINT)
286         
287         void unlinkIncomingCalls();
288
289 #if ENABLE(DFG_JIT) || ENABLE(LLINT)
290         void setJITCodeMap(PassOwnPtr<CompactJITCodeMap> jitCodeMap)
291         {
292             m_jitCodeMap = jitCodeMap;
293         }
294         CompactJITCodeMap* jitCodeMap()
295         {
296             return m_jitCodeMap.get();
297         }
298 #endif
299         
300 #if ENABLE(DFG_JIT)
301         void createDFGDataIfNecessary()
302         {
303             if (!!m_dfgData)
304                 return;
305             
306             m_dfgData = adoptPtr(new DFGData);
307         }
308         
309         DFG::OSREntryData* appendDFGOSREntryData(unsigned bytecodeIndex, unsigned machineCodeOffset)
310         {
311             createDFGDataIfNecessary();
312             DFG::OSREntryData entry;
313             entry.m_bytecodeIndex = bytecodeIndex;
314             entry.m_machineCodeOffset = machineCodeOffset;
315             m_dfgData->osrEntry.append(entry);
316             return &m_dfgData->osrEntry.last();
317         }
318         unsigned numberOfDFGOSREntries() const
319         {
320             if (!m_dfgData)
321                 return 0;
322             return m_dfgData->osrEntry.size();
323         }
324         DFG::OSREntryData* dfgOSREntryData(unsigned i) { return &m_dfgData->osrEntry[i]; }
325         DFG::OSREntryData* dfgOSREntryDataForBytecodeIndex(unsigned bytecodeIndex)
326         {
327             if (!m_dfgData)
328                 return 0;
329             if (m_dfgData->osrEntry.isEmpty())
330                 return 0;
331             DFG::OSREntryData* result = binarySearch<
332                 DFG::OSREntryData, unsigned, DFG::getOSREntryDataBytecodeIndex>(
333                     m_dfgData->osrEntry.begin(), m_dfgData->osrEntry.size(),
334                     bytecodeIndex, WTF::KeyMustNotBePresentInArray);
335             if (result->m_bytecodeIndex != bytecodeIndex)
336                 return 0;
337             return result;
338         }
339         
340         unsigned appendOSRExit(const DFG::OSRExit& osrExit)
341         {
342             createDFGDataIfNecessary();
343             unsigned result = m_dfgData->osrExit.size();
344             m_dfgData->osrExit.append(osrExit);
345             return result;
346         }
347         
348         DFG::OSRExit& lastOSRExit()
349         {
350             return m_dfgData->osrExit.last();
351         }
352         
353         unsigned appendSpeculationRecovery(const DFG::SpeculationRecovery& recovery)
354         {
355             createDFGDataIfNecessary();
356             unsigned result = m_dfgData->speculationRecovery.size();
357             m_dfgData->speculationRecovery.append(recovery);
358             return result;
359         }
360         
361         unsigned appendWatchpoint(const JumpReplacementWatchpoint& watchpoint)
362         {
363             createDFGDataIfNecessary();
364             unsigned result = m_dfgData->watchpoints.size();
365             m_dfgData->watchpoints.append(watchpoint);
366             return result;
367         }
368         
369         unsigned numberOfOSRExits()
370         {
371             if (!m_dfgData)
372                 return 0;
373             return m_dfgData->osrExit.size();
374         }
375         
376         unsigned numberOfSpeculationRecoveries()
377         {
378             if (!m_dfgData)
379                 return 0;
380             return m_dfgData->speculationRecovery.size();
381         }
382         
383         unsigned numberOfWatchpoints()
384         {
385             if (!m_dfgData)
386                 return 0;
387             return m_dfgData->watchpoints.size();
388         }
389         
390         DFG::OSRExit& osrExit(unsigned index)
391         {
392             return m_dfgData->osrExit[index];
393         }
394         
395         DFG::SpeculationRecovery& speculationRecovery(unsigned index)
396         {
397             return m_dfgData->speculationRecovery[index];
398         }
399         
400         JumpReplacementWatchpoint& watchpoint(unsigned index)
401         {
402             return m_dfgData->watchpoints[index];
403         }
404         
405         void appendWeakReference(JSCell* target)
406         {
407             createDFGDataIfNecessary();
408             m_dfgData->weakReferences.append(WriteBarrier<JSCell>(*globalData(), ownerExecutable(), target));
409         }
410         
411         void appendWeakReferenceTransition(JSCell* codeOrigin, JSCell* from, JSCell* to)
412         {
413             createDFGDataIfNecessary();
414             m_dfgData->transitions.append(
415                 WeakReferenceTransition(*globalData(), ownerExecutable(), codeOrigin, from, to));
416         }
417         
418         DFG::MinifiedGraph& minifiedDFG()
419         {
420             createDFGDataIfNecessary();
421             return m_dfgData->minifiedDFG;
422         }
423         
424         DFG::VariableEventStream& variableEventStream()
425         {
426             createDFGDataIfNecessary();
427             return m_dfgData->variableEventStream;
428         }
429 #endif
430
431         unsigned bytecodeOffset(Instruction* returnAddress)
432         {
433             ASSERT(returnAddress >= instructions().begin() && returnAddress < instructions().end());
434             return static_cast<Instruction*>(returnAddress) - instructions().begin();
435         }
436
437         void setIsNumericCompareFunction(bool isNumericCompareFunction) { m_isNumericCompareFunction = isNumericCompareFunction; }
438         bool isNumericCompareFunction() { return m_isNumericCompareFunction; }
439
440         unsigned numberOfInstructions() const { return m_instructions.size(); }
441         RefCountedArray<Instruction>& instructions() { return m_instructions; }
442         const RefCountedArray<Instruction>& instructions() const { return m_instructions; }
443         
444 #if ENABLE(BYTECODE_COMMENTS)
445         Vector<Comment>& bytecodeComments() { return m_bytecodeComments; }
446 #endif
447
448         size_t predictedMachineCodeSize();
449         
450         bool usesOpcode(OpcodeID);
451
452         unsigned instructionCount() { return m_instructions.size(); }
453
454         int argumentIndexAfterCapture(size_t argument);
455
456 #if ENABLE(JIT)
457         void setJITCode(const JITCode& code, MacroAssemblerCodePtr codeWithArityCheck)
458         {
459             m_jitCode = code;
460             m_jitCodeWithArityCheck = codeWithArityCheck;
461 #if ENABLE(DFG_JIT)
462             if (m_jitCode.jitType() == JITCode::DFGJIT) {
463                 createDFGDataIfNecessary();
464                 m_globalData->heap.m_dfgCodeBlocks.m_set.add(this);
465             }
466 #endif
467         }
468         JITCode& getJITCode() { return m_jitCode; }
469         MacroAssemblerCodePtr getJITCodeWithArityCheck() { return m_jitCodeWithArityCheck; }
470         JITCode::JITType getJITType() { return m_jitCode.jitType(); }
471         ExecutableMemoryHandle* executableMemory() { return getJITCode().getExecutableMemory(); }
472         virtual JSObject* compileOptimized(ExecState*, JSScope*, unsigned bytecodeIndex) = 0;
473         virtual void jettison() = 0;
474         enum JITCompilationResult { AlreadyCompiled, CouldNotCompile, CompiledSuccessfully };
475         JITCompilationResult jitCompile(ExecState* exec)
476         {
477             if (getJITType() != JITCode::InterpreterThunk) {
478                 ASSERT(getJITType() == JITCode::BaselineJIT);
479                 return AlreadyCompiled;
480             }
481 #if ENABLE(JIT)
482             if (jitCompileImpl(exec))
483                 return CompiledSuccessfully;
484             return CouldNotCompile;
485 #else
486             UNUSED_PARAM(exec);
487             return CouldNotCompile;
488 #endif
489         }
490         virtual CodeBlock* replacement() = 0;
491
492         virtual DFG::CapabilityLevel canCompileWithDFGInternal() = 0;
493         DFG::CapabilityLevel canCompileWithDFG()
494         {
495             DFG::CapabilityLevel result = canCompileWithDFGInternal();
496             m_canCompileWithDFGState = result;
497             return result;
498         }
499         DFG::CapabilityLevel canCompileWithDFGState() { return m_canCompileWithDFGState; }
500
501         bool hasOptimizedReplacement()
502         {
503             ASSERT(JITCode::isBaselineCode(getJITType()));
504             bool result = replacement()->getJITType() > getJITType();
505 #if !ASSERT_DISABLED
506             if (result)
507                 ASSERT(replacement()->getJITType() == JITCode::DFGJIT);
508             else {
509                 ASSERT(JITCode::isBaselineCode(replacement()->getJITType()));
510                 ASSERT(replacement() == this);
511             }
512 #endif
513             return result;
514         }
515 #else
516         JITCode::JITType getJITType() { return JITCode::BaselineJIT; }
517 #endif
518
519         ScriptExecutable* ownerExecutable() const { return m_ownerExecutable.get(); }
520
521         void setGlobalData(JSGlobalData* globalData) { m_globalData = globalData; }
522         JSGlobalData* globalData() { return m_globalData; }
523
524         void setThisRegister(int thisRegister) { m_thisRegister = thisRegister; }
525         int thisRegister() const { return m_thisRegister; }
526
527         void setNeedsFullScopeChain(bool needsFullScopeChain) { m_needsFullScopeChain = needsFullScopeChain; }
528         bool needsFullScopeChain() const { return m_needsFullScopeChain; }
529         void setUsesEval(bool usesEval) { m_usesEval = usesEval; }
530         bool usesEval() const { return m_usesEval; }
531         
532         void setArgumentsRegister(int argumentsRegister)
533         {
534             ASSERT(argumentsRegister != -1);
535             m_argumentsRegister = argumentsRegister;
536             ASSERT(usesArguments());
537         }
538         int argumentsRegister() const
539         {
540             ASSERT(usesArguments());
541             return m_argumentsRegister;
542         }
543         int uncheckedArgumentsRegister()
544         {
545             if (!usesArguments())
546                 return InvalidVirtualRegister;
547             return argumentsRegister();
548         }
549         void setActivationRegister(int activationRegister)
550         {
551             m_activationRegister = activationRegister;
552         }
553         int activationRegister() const
554         {
555             ASSERT(needsFullScopeChain());
556             return m_activationRegister;
557         }
558         int uncheckedActivationRegister()
559         {
560             if (!needsFullScopeChain())
561                 return InvalidVirtualRegister;
562             return activationRegister();
563         }
564         bool usesArguments() const { return m_argumentsRegister != -1; }
565         
566         bool needsActivation() const
567         {
568             return needsFullScopeChain() && codeType() != GlobalCode;
569         }
570         
571         bool isCaptured(int operand, InlineCallFrame* inlineCallFrame = 0) const
572         {
573             if (inlineCallFrame && !operandIsArgument(operand))
574                 return inlineCallFrame->capturedVars.get(operand);
575
576             if (operandIsArgument(operand))
577                 return usesArguments();
578
579             // The activation object isn't in the captured region, but it's "captured"
580             // in the sense that stores to its location can be observed indirectly.
581             if (needsActivation() && operand == activationRegister())
582                 return true;
583
584             // Ditto for the arguments object.
585             if (usesArguments() && operand == argumentsRegister())
586                 return true;
587
588             // Ditto for the arguments object.
589             if (usesArguments() && operand == unmodifiedArgumentsRegister(argumentsRegister()))
590                 return true;
591
592             return operand >= m_symbolTable->captureStart() 
593                 && operand < m_symbolTable->captureEnd();
594         }
595
596         CodeType codeType() const { return m_codeType; }
597
598         SourceProvider* source() const { return m_source.get(); }
599         unsigned sourceOffset() const { return m_sourceOffset; }
600
601         size_t numberOfJumpTargets() const { return m_jumpTargets.size(); }
602         void addJumpTarget(unsigned jumpTarget) { m_jumpTargets.append(jumpTarget); }
603         unsigned jumpTarget(int index) const { return m_jumpTargets[index]; }
604         unsigned lastJumpTarget() const { return m_jumpTargets.last(); }
605
606         void createActivation(CallFrame*);
607
608         void clearEvalCache();
609         
610         String nameForRegister(int registerNumber);
611         
612         void addPropertyAccessInstruction(unsigned propertyAccessInstruction)
613         {
614             m_propertyAccessInstructions.append(propertyAccessInstruction);
615         }
616 #if ENABLE(LLINT)
617         LLIntCallLinkInfo* addLLIntCallLinkInfo()
618         {
619             m_llintCallLinkInfos.append(LLIntCallLinkInfo());
620             return &m_llintCallLinkInfos.last();
621         }
622 #endif
623 #if ENABLE(JIT)
624         void setNumberOfStructureStubInfos(size_t size) { m_structureStubInfos.grow(size); }
625         size_t numberOfStructureStubInfos() const { return m_structureStubInfos.size(); }
626         StructureStubInfo& structureStubInfo(int index) { return m_structureStubInfos[index]; }
627         
628         void setNumberOfByValInfos(size_t size) { m_byValInfos.grow(size); }
629         size_t numberOfByValInfos() const { return m_byValInfos.size(); }
630         ByValInfo& byValInfo(size_t index) { return m_byValInfos[index]; }
631
632         void setNumberOfCallLinkInfos(size_t size) { m_callLinkInfos.grow(size); }
633         size_t numberOfCallLinkInfos() const { return m_callLinkInfos.size(); }
634         CallLinkInfo& callLinkInfo(int index) { return m_callLinkInfos[index]; }
635 #endif
636         
637 #if ENABLE(VALUE_PROFILER)
638         unsigned numberOfArgumentValueProfiles()
639         {
640             ASSERT(m_numParameters >= 0);
641             ASSERT(m_argumentValueProfiles.size() == static_cast<unsigned>(m_numParameters));
642             return m_argumentValueProfiles.size();
643         }
644         ValueProfile* valueProfileForArgument(unsigned argumentIndex)
645         {
646             ValueProfile* result = &m_argumentValueProfiles[argumentIndex];
647             ASSERT(result->m_bytecodeOffset == -1);
648             return result;
649         }
650         
651         ValueProfile* addValueProfile(int bytecodeOffset)
652         {
653             ASSERT(bytecodeOffset != -1);
654             ASSERT(m_valueProfiles.isEmpty() || m_valueProfiles.last().m_bytecodeOffset < bytecodeOffset);
655             m_valueProfiles.append(ValueProfile(bytecodeOffset));
656             return &m_valueProfiles.last();
657         }
658         unsigned numberOfValueProfiles() { return m_valueProfiles.size(); }
659         ValueProfile* valueProfile(int index)
660         {
661             ValueProfile* result = &m_valueProfiles[index];
662             ASSERT(result->m_bytecodeOffset != -1);
663             return result;
664         }
665         ValueProfile* valueProfileForBytecodeOffset(int bytecodeOffset)
666         {
667             ValueProfile* result = WTF::genericBinarySearch<ValueProfile, int, getValueProfileBytecodeOffset>(m_valueProfiles, m_valueProfiles.size(), bytecodeOffset);
668             ASSERT(result->m_bytecodeOffset != -1);
669             ASSERT(instructions()[bytecodeOffset + opcodeLength(
670                        m_globalData->interpreter->getOpcodeID(
671                            instructions()[
672                                bytecodeOffset].u.opcode)) - 1].u.profile == result);
673             return result;
674         }
675         SpeculatedType valueProfilePredictionForBytecodeOffset(int bytecodeOffset)
676         {
677             return valueProfileForBytecodeOffset(bytecodeOffset)->computeUpdatedPrediction();
678         }
679         
680         unsigned totalNumberOfValueProfiles()
681         {
682             return numberOfArgumentValueProfiles() + numberOfValueProfiles();
683         }
684         ValueProfile* getFromAllValueProfiles(unsigned index)
685         {
686             if (index < numberOfArgumentValueProfiles())
687                 return valueProfileForArgument(index);
688             return valueProfile(index - numberOfArgumentValueProfiles());
689         }
690         
691         RareCaseProfile* addRareCaseProfile(int bytecodeOffset)
692         {
693             m_rareCaseProfiles.append(RareCaseProfile(bytecodeOffset));
694             return &m_rareCaseProfiles.last();
695         }
696         unsigned numberOfRareCaseProfiles() { return m_rareCaseProfiles.size(); }
697         RareCaseProfile* rareCaseProfile(int index) { return &m_rareCaseProfiles[index]; }
698         RareCaseProfile* rareCaseProfileForBytecodeOffset(int bytecodeOffset)
699         {
700             return WTF::genericBinarySearch<RareCaseProfile, int, getRareCaseProfileBytecodeOffset>(m_rareCaseProfiles, m_rareCaseProfiles.size(), bytecodeOffset);
701         }
702         
703         bool likelyToTakeSlowCase(int bytecodeOffset)
704         {
705             if (!numberOfRareCaseProfiles())
706                 return false;
707             unsigned value = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
708             return value >= Options::likelyToTakeSlowCaseMinimumCount() && static_cast<double>(value) / m_executionEntryCount >= Options::likelyToTakeSlowCaseThreshold();
709         }
710         
711         bool couldTakeSlowCase(int bytecodeOffset)
712         {
713             if (!numberOfRareCaseProfiles())
714                 return false;
715             unsigned value = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
716             return value >= Options::couldTakeSlowCaseMinimumCount() && static_cast<double>(value) / m_executionEntryCount >= Options::couldTakeSlowCaseThreshold();
717         }
718         
719         RareCaseProfile* addSpecialFastCaseProfile(int bytecodeOffset)
720         {
721             m_specialFastCaseProfiles.append(RareCaseProfile(bytecodeOffset));
722             return &m_specialFastCaseProfiles.last();
723         }
724         unsigned numberOfSpecialFastCaseProfiles() { return m_specialFastCaseProfiles.size(); }
725         RareCaseProfile* specialFastCaseProfile(int index) { return &m_specialFastCaseProfiles[index]; }
726         RareCaseProfile* specialFastCaseProfileForBytecodeOffset(int bytecodeOffset)
727         {
728             return WTF::genericBinarySearch<RareCaseProfile, int, getRareCaseProfileBytecodeOffset>(m_specialFastCaseProfiles, m_specialFastCaseProfiles.size(), bytecodeOffset);
729         }
730         
731         bool likelyToTakeSpecialFastCase(int bytecodeOffset)
732         {
733             if (!numberOfRareCaseProfiles())
734                 return false;
735             unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
736             return specialFastCaseCount >= Options::likelyToTakeSlowCaseMinimumCount() && static_cast<double>(specialFastCaseCount) / m_executionEntryCount >= Options::likelyToTakeSlowCaseThreshold();
737         }
738         
739         bool couldTakeSpecialFastCase(int bytecodeOffset)
740         {
741             if (!numberOfRareCaseProfiles())
742                 return false;
743             unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
744             return specialFastCaseCount >= Options::couldTakeSlowCaseMinimumCount() && static_cast<double>(specialFastCaseCount) / m_executionEntryCount >= Options::couldTakeSlowCaseThreshold();
745         }
746         
747         bool likelyToTakeDeepestSlowCase(int bytecodeOffset)
748         {
749             if (!numberOfRareCaseProfiles())
750                 return false;
751             unsigned slowCaseCount = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
752             unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
753             unsigned value = slowCaseCount - specialFastCaseCount;
754             return value >= Options::likelyToTakeSlowCaseMinimumCount() && static_cast<double>(value) / m_executionEntryCount >= Options::likelyToTakeSlowCaseThreshold();
755         }
756         
757         bool likelyToTakeAnySlowCase(int bytecodeOffset)
758         {
759             if (!numberOfRareCaseProfiles())
760                 return false;
761             unsigned slowCaseCount = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
762             unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
763             unsigned value = slowCaseCount + specialFastCaseCount;
764             return value >= Options::likelyToTakeSlowCaseMinimumCount() && static_cast<double>(value) / m_executionEntryCount >= Options::likelyToTakeSlowCaseThreshold();
765         }
766         
767         unsigned executionEntryCount() const { return m_executionEntryCount; }
768
769         unsigned numberOfArrayProfiles() const { return m_arrayProfiles.size(); }
770         const ArrayProfileVector& arrayProfiles() { return m_arrayProfiles; }
771         ArrayProfile* addArrayProfile(unsigned bytecodeOffset)
772         {
773             m_arrayProfiles.append(ArrayProfile(bytecodeOffset));
774             return &m_arrayProfiles.last();
775         }
776         ArrayProfile* getArrayProfile(unsigned bytecodeOffset);
777         ArrayProfile* getOrAddArrayProfile(unsigned bytecodeOffset);
778 #endif
779
780         // Exception handling support
781
782         size_t numberOfExceptionHandlers() const { return m_rareData ? m_rareData->m_exceptionHandlers.size() : 0; }
783         void addExceptionHandler(const HandlerInfo& hanler) { createRareDataIfNecessary(); return m_rareData->m_exceptionHandlers.append(hanler); }
784         HandlerInfo& exceptionHandler(int index) { ASSERT(m_rareData); return m_rareData->m_exceptionHandlers[index]; }
785
786         void addExpressionInfo(const ExpressionRangeInfo& expressionInfo)
787         {
788             createRareDataIfNecessary();
789             m_rareData->m_expressionInfo.append(expressionInfo);
790         }
791
792         void addLineInfo(unsigned bytecodeOffset, int lineNo)
793         {
794             Vector<LineInfo>& lineInfo = m_lineInfo;
795             if (!lineInfo.size() || lineInfo.last().lineNumber != lineNo) {
796                 LineInfo info = { bytecodeOffset, lineNo };
797                 lineInfo.append(info);
798             }
799         }
800
801         bool hasExpressionInfo() { return m_rareData && m_rareData->m_expressionInfo.size(); }
802
803 #if ENABLE(JIT)
804         Vector<CallReturnOffsetToBytecodeOffset>& callReturnIndexVector()
805         {
806             createRareDataIfNecessary();
807             return m_rareData->m_callReturnIndexVector;
808         }
809 #endif
810
811 #if ENABLE(DFG_JIT)
812         SegmentedVector<InlineCallFrame, 4>& inlineCallFrames()
813         {
814             createRareDataIfNecessary();
815             return m_rareData->m_inlineCallFrames;
816         }
817         
818         Vector<CodeOriginAtCallReturnOffset>& codeOrigins()
819         {
820             createRareDataIfNecessary();
821             return m_rareData->m_codeOrigins;
822         }
823         
824         // Having code origins implies that there has been some inlining.
825         bool hasCodeOrigins()
826         {
827             return m_rareData && !!m_rareData->m_codeOrigins.size();
828         }
829         
830         bool codeOriginForReturn(ReturnAddressPtr returnAddress, CodeOrigin& codeOrigin)
831         {
832             if (!hasCodeOrigins())
833                 return false;
834             unsigned offset = getJITCode().offsetOf(returnAddress.value());
835             CodeOriginAtCallReturnOffset* entry = binarySearch<CodeOriginAtCallReturnOffset, unsigned, getCallReturnOffsetForCodeOrigin>(codeOrigins().begin(), codeOrigins().size(), offset, WTF::KeyMustNotBePresentInArray);
836             if (entry->callReturnOffset != offset)
837                 return false;
838             codeOrigin = entry->codeOrigin;
839             return true;
840         }
841         
842         CodeOrigin codeOrigin(unsigned index)
843         {
844             ASSERT(m_rareData);
845             return m_rareData->m_codeOrigins[index].codeOrigin;
846         }
847         
848         bool addFrequentExitSite(const DFG::FrequentExitSite& site)
849         {
850             ASSERT(JITCode::isBaselineCode(getJITType()));
851             return m_exitProfile.add(site);
852         }
853
854         DFG::ExitProfile& exitProfile() { return m_exitProfile; }
855         
856         CompressedLazyOperandValueProfileHolder& lazyOperandValueProfiles()
857         {
858             return m_lazyOperandValueProfiles;
859         }
860 #endif
861
862         // Constant Pool
863
864         size_t numberOfIdentifiers() const { return m_identifiers.size(); }
865         void addIdentifier(const Identifier& i) { return m_identifiers.append(i); }
866         Identifier& identifier(int index) { return m_identifiers[index]; }
867
868         size_t numberOfConstantRegisters() const { return m_constantRegisters.size(); }
869         unsigned addConstant(JSValue v)
870         {
871             unsigned result = m_constantRegisters.size();
872             m_constantRegisters.append(WriteBarrier<Unknown>());
873             m_constantRegisters.last().set(m_globalObject->globalData(), m_ownerExecutable.get(), v);
874             return result;
875         }
876         unsigned addOrFindConstant(JSValue);
877         WriteBarrier<Unknown>& constantRegister(int index) { return m_constantRegisters[index - FirstConstantRegisterIndex]; }
878         ALWAYS_INLINE bool isConstantRegisterIndex(int index) const { return index >= FirstConstantRegisterIndex; }
879         ALWAYS_INLINE JSValue getConstant(int index) const { return m_constantRegisters[index - FirstConstantRegisterIndex].get(); }
880
881         unsigned addFunctionDecl(FunctionExecutable* n)
882         {
883             unsigned size = m_functionDecls.size();
884             m_functionDecls.append(WriteBarrier<FunctionExecutable>());
885             m_functionDecls.last().set(m_globalObject->globalData(), m_ownerExecutable.get(), n);
886             return size;
887         }
888         FunctionExecutable* functionDecl(int index) { return m_functionDecls[index].get(); }
889         int numberOfFunctionDecls() { return m_functionDecls.size(); }
890         unsigned addFunctionExpr(FunctionExecutable* n)
891         {
892             unsigned size = m_functionExprs.size();
893             m_functionExprs.append(WriteBarrier<FunctionExecutable>());
894             m_functionExprs.last().set(m_globalObject->globalData(), m_ownerExecutable.get(), n);
895             return size;
896         }
897         FunctionExecutable* functionExpr(int index) { return m_functionExprs[index].get(); }
898
899         unsigned addRegExp(RegExp* r)
900         {
901             createRareDataIfNecessary();
902             unsigned size = m_rareData->m_regexps.size();
903             m_rareData->m_regexps.append(WriteBarrier<RegExp>(*m_globalData, ownerExecutable(), r));
904             return size;
905         }
906         unsigned numberOfRegExps() const
907         {
908             if (!m_rareData)
909                 return 0;
910             return m_rareData->m_regexps.size();
911         }
912         RegExp* regexp(int index) const { ASSERT(m_rareData); return m_rareData->m_regexps[index].get(); }
913
914         unsigned numberOfConstantBuffers() const
915         {
916             if (!m_rareData)
917                 return 0;
918             return m_rareData->m_constantBuffers.size();
919         }
920         unsigned addConstantBuffer(const Vector<JSValue>& buffer)
921         {
922             createRareDataIfNecessary();
923             unsigned size = m_rareData->m_constantBuffers.size();
924             m_rareData->m_constantBuffers.append(buffer);
925             return size;
926         }
927         unsigned addConstantBuffer(unsigned length)
928         {
929             return addConstantBuffer(Vector<JSValue>(length));
930         }
931
932         Vector<JSValue>& constantBufferAsVector(unsigned index)
933         {
934             ASSERT(m_rareData);
935             return m_rareData->m_constantBuffers[index];
936         }
937         JSValue* constantBuffer(unsigned index)
938         {
939             return constantBufferAsVector(index).data();
940         }
941
942         JSGlobalObject* globalObject() { return m_globalObject.get(); }
943         
944         JSGlobalObject* globalObjectFor(CodeOrigin codeOrigin)
945         {
946             if (!codeOrigin.inlineCallFrame)
947                 return globalObject();
948             // FIXME: if we ever inline based on executable not function, this code will need to change.
949             return codeOrigin.inlineCallFrame->callee->scope()->globalObject();
950         }
951
952         // Jump Tables
953
954         size_t numberOfImmediateSwitchJumpTables() const { return m_rareData ? m_rareData->m_immediateSwitchJumpTables.size() : 0; }
955         SimpleJumpTable& addImmediateSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_immediateSwitchJumpTables.append(SimpleJumpTable()); return m_rareData->m_immediateSwitchJumpTables.last(); }
956         SimpleJumpTable& immediateSwitchJumpTable(int tableIndex) { ASSERT(m_rareData); return m_rareData->m_immediateSwitchJumpTables[tableIndex]; }
957
958         size_t numberOfCharacterSwitchJumpTables() const { return m_rareData ? m_rareData->m_characterSwitchJumpTables.size() : 0; }
959         SimpleJumpTable& addCharacterSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_characterSwitchJumpTables.append(SimpleJumpTable()); return m_rareData->m_characterSwitchJumpTables.last(); }
960         SimpleJumpTable& characterSwitchJumpTable(int tableIndex) { ASSERT(m_rareData); return m_rareData->m_characterSwitchJumpTables[tableIndex]; }
961
962         size_t numberOfStringSwitchJumpTables() const { return m_rareData ? m_rareData->m_stringSwitchJumpTables.size() : 0; }
963         StringJumpTable& addStringSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_stringSwitchJumpTables.append(StringJumpTable()); return m_rareData->m_stringSwitchJumpTables.last(); }
964         StringJumpTable& stringSwitchJumpTable(int tableIndex) { ASSERT(m_rareData); return m_rareData->m_stringSwitchJumpTables[tableIndex]; }
965
966
967         SharedSymbolTable* symbolTable() { return m_symbolTable.get(); }
968
969         EvalCodeCache& evalCodeCache() { createRareDataIfNecessary(); return m_rareData->m_evalCodeCache; }
970
971         enum ShrinkMode {
972             // Shrink prior to generating machine code that may point directly into vectors.
973             EarlyShrink,
974             
975             // Shrink after generating machine code, and after possibly creating new vectors
976             // and appending to others. At this time it is not safe to shrink certain vectors
977             // because we would have generated machine code that references them directly.
978             LateShrink
979         };
980         void shrinkToFit(ShrinkMode);
981         
982         void copyPostParseDataFrom(CodeBlock* alternative);
983         void copyPostParseDataFromAlternative();
984         
985         // Functions for controlling when JITting kicks in, in a mixed mode
986         // execution world.
987         
988         bool checkIfJITThresholdReached()
989         {
990             return m_llintExecuteCounter.checkIfThresholdCrossedAndSet(this);
991         }
992         
993         void dontJITAnytimeSoon()
994         {
995             m_llintExecuteCounter.deferIndefinitely();
996         }
997         
998         void jitAfterWarmUp()
999         {
1000             m_llintExecuteCounter.setNewThreshold(Options::thresholdForJITAfterWarmUp(), this);
1001         }
1002         
1003         void jitSoon()
1004         {
1005             m_llintExecuteCounter.setNewThreshold(Options::thresholdForJITSoon(), this);
1006         }
1007         
1008         const ExecutionCounter& llintExecuteCounter() const
1009         {
1010             return m_llintExecuteCounter;
1011         }
1012         
1013         // Functions for controlling when tiered compilation kicks in. This
1014         // controls both when the optimizing compiler is invoked and when OSR
1015         // entry happens. Two triggers exist: the loop trigger and the return
1016         // trigger. In either case, when an addition to m_jitExecuteCounter
1017         // causes it to become non-negative, the optimizing compiler is
1018         // invoked. This includes a fast check to see if this CodeBlock has
1019         // already been optimized (i.e. replacement() returns a CodeBlock
1020         // that was optimized with a higher tier JIT than this one). In the
1021         // case of the loop trigger, if the optimized compilation succeeds
1022         // (or has already succeeded in the past) then OSR is attempted to
1023         // redirect program flow into the optimized code.
1024         
1025         // These functions are called from within the optimization triggers,
1026         // and are used as a single point at which we define the heuristics
1027         // for how much warm-up is mandated before the next optimization
1028         // trigger files. All CodeBlocks start out with optimizeAfterWarmUp(),
1029         // as this is called from the CodeBlock constructor.
1030         
1031         // When we observe a lot of speculation failures, we trigger a
1032         // reoptimization. But each time, we increase the optimization trigger
1033         // to avoid thrashing.
1034         unsigned reoptimizationRetryCounter() const
1035         {
1036             ASSERT(m_reoptimizationRetryCounter <= Options::reoptimizationRetryCounterMax());
1037             return m_reoptimizationRetryCounter;
1038         }
1039         
1040         void countReoptimization()
1041         {
1042             m_reoptimizationRetryCounter++;
1043             if (m_reoptimizationRetryCounter > Options::reoptimizationRetryCounterMax())
1044                 m_reoptimizationRetryCounter = Options::reoptimizationRetryCounterMax();
1045         }
1046         
1047         int32_t counterValueForOptimizeAfterWarmUp()
1048         {
1049             return Options::thresholdForOptimizeAfterWarmUp() << reoptimizationRetryCounter();
1050         }
1051         
1052         int32_t counterValueForOptimizeAfterLongWarmUp()
1053         {
1054             return Options::thresholdForOptimizeAfterLongWarmUp() << reoptimizationRetryCounter();
1055         }
1056         
1057         int32_t* addressOfJITExecuteCounter()
1058         {
1059             return &m_jitExecuteCounter.m_counter;
1060         }
1061         
1062         static ptrdiff_t offsetOfJITExecuteCounter() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(ExecutionCounter, m_counter); }
1063         static ptrdiff_t offsetOfJITExecutionActiveThreshold() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(ExecutionCounter, m_activeThreshold); }
1064         static ptrdiff_t offsetOfJITExecutionTotalCount() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(ExecutionCounter, m_totalCount); }
1065
1066         const ExecutionCounter& jitExecuteCounter() const { return m_jitExecuteCounter; }
1067         
1068         unsigned optimizationDelayCounter() const { return m_optimizationDelayCounter; }
1069         
1070         // Check if the optimization threshold has been reached, and if not,
1071         // adjust the heuristics accordingly. Returns true if the threshold has
1072         // been reached.
1073         bool checkIfOptimizationThresholdReached()
1074         {
1075             return m_jitExecuteCounter.checkIfThresholdCrossedAndSet(this);
1076         }
1077         
1078         // Call this to force the next optimization trigger to fire. This is
1079         // rarely wise, since optimization triggers are typically more
1080         // expensive than executing baseline code.
1081         void optimizeNextInvocation()
1082         {
1083             m_jitExecuteCounter.setNewThreshold(0, this);
1084         }
1085         
1086         // Call this to prevent optimization from happening again. Note that
1087         // optimization will still happen after roughly 2^29 invocations,
1088         // so this is really meant to delay that as much as possible. This
1089         // is called if optimization failed, and we expect it to fail in
1090         // the future as well.
1091         void dontOptimizeAnytimeSoon()
1092         {
1093             m_jitExecuteCounter.deferIndefinitely();
1094         }
1095         
1096         // Call this to reinitialize the counter to its starting state,
1097         // forcing a warm-up to happen before the next optimization trigger
1098         // fires. This is called in the CodeBlock constructor. It also
1099         // makes sense to call this if an OSR exit occurred. Note that
1100         // OSR exit code is code generated, so the value of the execute
1101         // counter that this corresponds to is also available directly.
1102         void optimizeAfterWarmUp()
1103         {
1104             m_jitExecuteCounter.setNewThreshold(counterValueForOptimizeAfterWarmUp(), this);
1105         }
1106         
1107         // Call this to force an optimization trigger to fire only after
1108         // a lot of warm-up.
1109         void optimizeAfterLongWarmUp()
1110         {
1111             m_jitExecuteCounter.setNewThreshold(counterValueForOptimizeAfterLongWarmUp(), this);
1112         }
1113         
1114         // Call this to cause an optimization trigger to fire soon, but
1115         // not necessarily the next one. This makes sense if optimization
1116         // succeeds. Successfuly optimization means that all calls are
1117         // relinked to the optimized code, so this only affects call
1118         // frames that are still executing this CodeBlock. The value here
1119         // is tuned to strike a balance between the cost of OSR entry
1120         // (which is too high to warrant making every loop back edge to
1121         // trigger OSR immediately) and the cost of executing baseline
1122         // code (which is high enough that we don't necessarily want to
1123         // have a full warm-up). The intuition for calling this instead of
1124         // optimizeNextInvocation() is for the case of recursive functions
1125         // with loops. Consider that there may be N call frames of some
1126         // recursive function, for a reasonably large value of N. The top
1127         // one triggers optimization, and then returns, and then all of
1128         // the others return. We don't want optimization to be triggered on
1129         // each return, as that would be superfluous. It only makes sense
1130         // to trigger optimization if one of those functions becomes hot
1131         // in the baseline code.
1132         void optimizeSoon()
1133         {
1134             m_jitExecuteCounter.setNewThreshold(Options::thresholdForOptimizeSoon() << reoptimizationRetryCounter(), this);
1135         }
1136         
1137         uint32_t osrExitCounter() const { return m_osrExitCounter; }
1138         
1139         void countOSRExit() { m_osrExitCounter++; }
1140         
1141         uint32_t* addressOfOSRExitCounter() { return &m_osrExitCounter; }
1142         
1143         static ptrdiff_t offsetOfOSRExitCounter() { return OBJECT_OFFSETOF(CodeBlock, m_osrExitCounter); }
1144
1145 #if ENABLE(JIT)
1146         uint32_t adjustedExitCountThreshold(uint32_t desiredThreshold)
1147         {
1148             ASSERT(getJITType() == JITCode::DFGJIT);
1149             // Compute this the lame way so we don't saturate. This is called infrequently
1150             // enough that this loop won't hurt us.
1151             unsigned result = desiredThreshold;
1152             for (unsigned n = baselineVersion()->reoptimizationRetryCounter(); n--;) {
1153                 unsigned newResult = result << 1;
1154                 if (newResult < result)
1155                     return std::numeric_limits<uint32_t>::max();
1156                 result = newResult;
1157             }
1158             return result;
1159         }
1160         
1161         uint32_t exitCountThresholdForReoptimization()
1162         {
1163             return adjustedExitCountThreshold(Options::osrExitCountForReoptimization());
1164         }
1165         
1166         uint32_t exitCountThresholdForReoptimizationFromLoop()
1167         {
1168             return adjustedExitCountThreshold(Options::osrExitCountForReoptimizationFromLoop());
1169         }
1170
1171         bool shouldReoptimizeNow()
1172         {
1173             return osrExitCounter() >= exitCountThresholdForReoptimization();
1174         }
1175         
1176         bool shouldReoptimizeFromLoopNow()
1177         {
1178             return osrExitCounter() >= exitCountThresholdForReoptimizationFromLoop();
1179         }
1180 #endif
1181
1182 #if ENABLE(VALUE_PROFILER)
1183         bool shouldOptimizeNow();
1184         void updateAllPredictions(OperationInProgress = NoOperation);
1185 #else
1186         bool shouldOptimizeNow() { return false; }
1187         void updateAllPredictions(OperationInProgress = NoOperation) { }
1188 #endif
1189         
1190 #if ENABLE(JIT)
1191         void reoptimize();
1192 #endif
1193
1194 #if ENABLE(VERBOSE_VALUE_PROFILE)
1195         void dumpValueProfiles();
1196 #endif
1197         
1198         // FIXME: Make these remaining members private.
1199
1200         int m_numCalleeRegisters;
1201         int m_numVars;
1202         bool m_isConstructor;
1203
1204     protected:
1205 #if ENABLE(JIT)
1206         virtual bool jitCompileImpl(ExecState*) = 0;
1207 #endif
1208         virtual void visitWeakReferences(SlotVisitor&);
1209         virtual void finalizeUnconditionally();
1210
1211     private:
1212         friend class DFGCodeBlocks;
1213         
1214 #if ENABLE(DFG_JIT)
1215         void tallyFrequentExitSites();
1216 #else
1217         void tallyFrequentExitSites() { }
1218 #endif
1219 #if ENABLE(VALUE_PROFILER)
1220         void updateAllPredictionsAndCountLiveness(OperationInProgress, unsigned& numberOfLiveNonArgumentValueProfiles, unsigned& numberOfSamplesInProfiles);
1221 #endif
1222         
1223         void dump(ExecState*, const Vector<Instruction>::const_iterator& begin, Vector<Instruction>::const_iterator&);
1224
1225         CString registerName(ExecState*, int r) const;
1226         void printUnaryOp(ExecState*, int location, Vector<Instruction>::const_iterator&, const char* op);
1227         void printBinaryOp(ExecState*, int location, Vector<Instruction>::const_iterator&, const char* op);
1228         void printConditionalJump(ExecState*, const Vector<Instruction>::const_iterator&, Vector<Instruction>::const_iterator&, int location, const char* op);
1229         void printGetByIdOp(ExecState*, int location, Vector<Instruction>::const_iterator&);
1230         void printGetByIdCacheStatus(ExecState*, int location);
1231         enum CacheDumpMode { DumpCaches, DontDumpCaches };
1232         void printCallOp(ExecState*, int location, Vector<Instruction>::const_iterator&, const char* op, CacheDumpMode);
1233         void printPutByIdOp(ExecState*, int location, Vector<Instruction>::const_iterator&, const char* op);
1234         void visitStructures(SlotVisitor&, Instruction* vPC);
1235         
1236 #if ENABLE(DFG_JIT)
1237         bool shouldImmediatelyAssumeLivenessDuringScan()
1238         {
1239             // Null m_dfgData means that this is a baseline JIT CodeBlock. Baseline JIT
1240             // CodeBlocks don't need to be jettisoned when their weak references go
1241             // stale. So if a basline JIT CodeBlock gets scanned, we can assume that
1242             // this means that it's live.
1243             if (!m_dfgData)
1244                 return true;
1245             
1246             // For simplicity, we don't attempt to jettison code blocks during GC if
1247             // they are executing. Instead we strongly mark their weak references to
1248             // allow them to continue to execute soundly.
1249             if (m_dfgData->mayBeExecuting)
1250                 return true;
1251
1252             return false;
1253         }
1254 #else
1255         bool shouldImmediatelyAssumeLivenessDuringScan() { return true; }
1256 #endif
1257         
1258         void performTracingFixpointIteration(SlotVisitor&);
1259         
1260         void stronglyVisitStrongReferences(SlotVisitor&);
1261         void stronglyVisitWeakReferences(SlotVisitor&);
1262
1263         void createRareDataIfNecessary()
1264         {
1265             if (!m_rareData)
1266                 m_rareData = adoptPtr(new RareData);
1267         }
1268
1269 #if ENABLE(JIT)
1270         void resetStubInternal(RepatchBuffer&, StructureStubInfo&);
1271 #endif
1272         
1273         int m_numParameters;
1274
1275         WriteBarrier<ScriptExecutable> m_ownerExecutable;
1276         JSGlobalData* m_globalData;
1277
1278         RefCountedArray<Instruction> m_instructions;
1279
1280         int m_thisRegister;
1281         int m_argumentsRegister;
1282         int m_activationRegister;
1283
1284         bool m_needsFullScopeChain;
1285         bool m_usesEval;
1286         bool m_isNumericCompareFunction;
1287         bool m_isStrictMode;
1288
1289         CodeType m_codeType;
1290
1291         RefPtr<SourceProvider> m_source;
1292         unsigned m_sourceOffset;
1293
1294         Vector<unsigned> m_propertyAccessInstructions;
1295 #if ENABLE(LLINT)
1296         SegmentedVector<LLIntCallLinkInfo, 8> m_llintCallLinkInfos;
1297         SentinelLinkedList<LLIntCallLinkInfo, BasicRawSentinelNode<LLIntCallLinkInfo> > m_incomingLLIntCalls;
1298 #endif
1299 #if ENABLE(JIT)
1300         Vector<StructureStubInfo> m_structureStubInfos;
1301         Vector<ByValInfo> m_byValInfos;
1302         Vector<CallLinkInfo> m_callLinkInfos;
1303         JITCode m_jitCode;
1304         MacroAssemblerCodePtr m_jitCodeWithArityCheck;
1305         SentinelLinkedList<CallLinkInfo, BasicRawSentinelNode<CallLinkInfo> > m_incomingCalls;
1306 #endif
1307 #if ENABLE(DFG_JIT) || ENABLE(LLINT)
1308         OwnPtr<CompactJITCodeMap> m_jitCodeMap;
1309 #endif
1310 #if ENABLE(DFG_JIT)
1311         struct WeakReferenceTransition {
1312             WeakReferenceTransition() { }
1313             
1314             WeakReferenceTransition(JSGlobalData& globalData, JSCell* owner, JSCell* codeOrigin, JSCell* from, JSCell* to)
1315                 : m_from(globalData, owner, from)
1316                 , m_to(globalData, owner, to)
1317             {
1318                 if (!!codeOrigin)
1319                     m_codeOrigin.set(globalData, owner, codeOrigin);
1320             }
1321
1322             WriteBarrier<JSCell> m_codeOrigin;
1323             WriteBarrier<JSCell> m_from;
1324             WriteBarrier<JSCell> m_to;
1325         };
1326         
1327         struct DFGData {
1328             DFGData()
1329                 : mayBeExecuting(false)
1330                 , isJettisoned(false)
1331             {
1332             }
1333             
1334             Vector<DFG::OSREntryData> osrEntry;
1335             SegmentedVector<DFG::OSRExit, 8> osrExit;
1336             Vector<DFG::SpeculationRecovery> speculationRecovery;
1337             SegmentedVector<JumpReplacementWatchpoint, 1, 0> watchpoints;
1338             Vector<WeakReferenceTransition> transitions;
1339             Vector<WriteBarrier<JSCell> > weakReferences;
1340             DFG::VariableEventStream variableEventStream;
1341             DFG::MinifiedGraph minifiedDFG;
1342             bool mayBeExecuting;
1343             bool isJettisoned;
1344             bool livenessHasBeenProved; // Initialized and used on every GC.
1345             bool allTransitionsHaveBeenMarked; // Initialized and used on every GC.
1346             unsigned visitAggregateHasBeenCalled; // Unsigned to make it work seamlessly with the broadest set of CAS implementations.
1347         };
1348         
1349         OwnPtr<DFGData> m_dfgData;
1350         
1351         // This is relevant to non-DFG code blocks that serve as the profiled code block
1352         // for DFG code blocks.
1353         DFG::ExitProfile m_exitProfile;
1354         CompressedLazyOperandValueProfileHolder m_lazyOperandValueProfiles;
1355 #endif
1356 #if ENABLE(VALUE_PROFILER)
1357         Vector<ValueProfile> m_argumentValueProfiles;
1358         SegmentedVector<ValueProfile, 8> m_valueProfiles;
1359         SegmentedVector<RareCaseProfile, 8> m_rareCaseProfiles;
1360         SegmentedVector<RareCaseProfile, 8> m_specialFastCaseProfiles;
1361         ArrayProfileVector m_arrayProfiles;
1362         unsigned m_executionEntryCount;
1363 #endif
1364
1365         Vector<unsigned> m_jumpTargets;
1366         Vector<unsigned> m_loopTargets;
1367
1368         // Constant Pool
1369         Vector<Identifier> m_identifiers;
1370         COMPILE_ASSERT(sizeof(Register) == sizeof(WriteBarrier<Unknown>), Register_must_be_same_size_as_WriteBarrier_Unknown);
1371         Vector<WriteBarrier<Unknown> > m_constantRegisters;
1372         Vector<WriteBarrier<FunctionExecutable> > m_functionDecls;
1373         Vector<WriteBarrier<FunctionExecutable> > m_functionExprs;
1374
1375         WriteBarrier<SharedSymbolTable> m_symbolTable;
1376
1377         OwnPtr<CodeBlock> m_alternative;
1378         
1379         ExecutionCounter m_llintExecuteCounter;
1380         
1381         ExecutionCounter m_jitExecuteCounter;
1382         int32_t m_totalJITExecutions;
1383         uint32_t m_osrExitCounter;
1384         uint16_t m_optimizationDelayCounter;
1385         uint16_t m_reoptimizationRetryCounter;
1386
1387         Vector<LineInfo> m_lineInfo;
1388 #if ENABLE(BYTECODE_COMMENTS)
1389         Vector<Comment>  m_bytecodeComments;
1390         size_t m_bytecodeCommentIterator;
1391 #endif
1392         Vector<ResolveOperations> m_resolveOperations;
1393         Vector<PutToBaseOperation, 1> m_putToBaseOperations;
1394
1395         struct RareData {
1396            WTF_MAKE_FAST_ALLOCATED;
1397         public:
1398             Vector<HandlerInfo> m_exceptionHandlers;
1399
1400             // Rare Constants
1401             Vector<WriteBarrier<RegExp> > m_regexps;
1402
1403             // Buffers used for large array literals
1404             Vector<Vector<JSValue> > m_constantBuffers;
1405             
1406             // Jump Tables
1407             Vector<SimpleJumpTable> m_immediateSwitchJumpTables;
1408             Vector<SimpleJumpTable> m_characterSwitchJumpTables;
1409             Vector<StringJumpTable> m_stringSwitchJumpTables;
1410
1411             EvalCodeCache m_evalCodeCache;
1412
1413             // Expression info - present if debugging.
1414             Vector<ExpressionRangeInfo> m_expressionInfo;
1415             // Line info - present if profiling or debugging.
1416 #if ENABLE(JIT)
1417             Vector<CallReturnOffsetToBytecodeOffset> m_callReturnIndexVector;
1418 #endif
1419 #if ENABLE(DFG_JIT)
1420             SegmentedVector<InlineCallFrame, 4> m_inlineCallFrames;
1421             Vector<CodeOriginAtCallReturnOffset> m_codeOrigins;
1422 #endif
1423         };
1424 #if COMPILER(MSVC)
1425         friend void WTF::deleteOwnedPtr<RareData>(RareData*);
1426 #endif
1427         OwnPtr<RareData> m_rareData;
1428 #if ENABLE(JIT)
1429         DFG::CapabilityLevel m_canCompileWithDFGState;
1430 #endif
1431     };
1432
1433     // Program code is not marked by any function, so we make the global object
1434     // responsible for marking it.
1435
1436     class GlobalCodeBlock : public CodeBlock {
1437     protected:
1438         GlobalCodeBlock(CopyParsedBlockTag, GlobalCodeBlock& other)
1439             : CodeBlock(CopyParsedBlock, other)
1440         {
1441         }
1442         
1443         GlobalCodeBlock(ScriptExecutable* ownerExecutable, CodeType codeType, JSGlobalObject* globalObject, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, PassOwnPtr<CodeBlock> alternative)
1444             : CodeBlock(ownerExecutable, codeType, globalObject, sourceProvider, sourceOffset, false, alternative)
1445         {
1446         }
1447     };
1448
1449     class ProgramCodeBlock : public GlobalCodeBlock {
1450     public:
1451         ProgramCodeBlock(CopyParsedBlockTag, ProgramCodeBlock& other)
1452             : GlobalCodeBlock(CopyParsedBlock, other)
1453         {
1454         }
1455
1456         ProgramCodeBlock(ProgramExecutable* ownerExecutable, CodeType codeType, JSGlobalObject* globalObject, PassRefPtr<SourceProvider> sourceProvider, PassOwnPtr<CodeBlock> alternative)
1457             : GlobalCodeBlock(ownerExecutable, codeType, globalObject, sourceProvider, 0, alternative)
1458         {
1459         }
1460         
1461 #if ENABLE(JIT)
1462     protected:
1463         virtual JSObject* compileOptimized(ExecState*, JSScope*, unsigned bytecodeIndex);
1464         virtual void jettison();
1465         virtual bool jitCompileImpl(ExecState*);
1466         virtual CodeBlock* replacement();
1467         virtual DFG::CapabilityLevel canCompileWithDFGInternal();
1468 #endif
1469     };
1470
1471     class EvalCodeBlock : public GlobalCodeBlock {
1472     public:
1473         EvalCodeBlock(CopyParsedBlockTag, EvalCodeBlock& other)
1474             : GlobalCodeBlock(CopyParsedBlock, other)
1475             , m_baseScopeDepth(other.m_baseScopeDepth)
1476             , m_variables(other.m_variables)
1477         {
1478         }
1479         
1480         EvalCodeBlock(EvalExecutable* ownerExecutable, JSGlobalObject* globalObject, PassRefPtr<SourceProvider> sourceProvider, int baseScopeDepth, PassOwnPtr<CodeBlock> alternative)
1481             : GlobalCodeBlock(ownerExecutable, EvalCode, globalObject, sourceProvider, 0, alternative)
1482             , m_baseScopeDepth(baseScopeDepth)
1483         {
1484         }
1485
1486         int baseScopeDepth() const { return m_baseScopeDepth; }
1487
1488         const Identifier& variable(unsigned index) { return m_variables[index]; }
1489         unsigned numVariables() { return m_variables.size(); }
1490         void adoptVariables(Vector<Identifier>& variables)
1491         {
1492             ASSERT(m_variables.isEmpty());
1493             m_variables.swap(variables);
1494         }
1495         
1496 #if ENABLE(JIT)
1497     protected:
1498         virtual JSObject* compileOptimized(ExecState*, JSScope*, unsigned bytecodeIndex);
1499         virtual void jettison();
1500         virtual bool jitCompileImpl(ExecState*);
1501         virtual CodeBlock* replacement();
1502         virtual DFG::CapabilityLevel canCompileWithDFGInternal();
1503 #endif
1504
1505     private:
1506         int m_baseScopeDepth;
1507         Vector<Identifier> m_variables;
1508     };
1509
1510     class FunctionCodeBlock : public CodeBlock {
1511     public:
1512         FunctionCodeBlock(CopyParsedBlockTag, FunctionCodeBlock& other)
1513             : CodeBlock(CopyParsedBlock, other)
1514         {
1515         }
1516
1517         FunctionCodeBlock(FunctionExecutable* ownerExecutable, CodeType codeType, JSGlobalObject* globalObject, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, bool isConstructor, PassOwnPtr<CodeBlock> alternative = nullptr)
1518             : CodeBlock(ownerExecutable, codeType, globalObject, sourceProvider, sourceOffset, isConstructor, alternative)
1519         {
1520         }
1521         
1522 #if ENABLE(JIT)
1523     protected:
1524         virtual JSObject* compileOptimized(ExecState*, JSScope*, unsigned bytecodeIndex);
1525         virtual void jettison();
1526         virtual bool jitCompileImpl(ExecState*);
1527         virtual CodeBlock* replacement();
1528         virtual DFG::CapabilityLevel canCompileWithDFGInternal();
1529 #endif
1530     };
1531
1532     inline CodeBlock* baselineCodeBlockForInlineCallFrame(InlineCallFrame* inlineCallFrame)
1533     {
1534         ASSERT(inlineCallFrame);
1535         ExecutableBase* executable = inlineCallFrame->executable.get();
1536         ASSERT(executable->structure()->classInfo() == &FunctionExecutable::s_info);
1537         return static_cast<FunctionExecutable*>(executable)->baselineCodeBlockFor(inlineCallFrame->isCall ? CodeForCall : CodeForConstruct);
1538     }
1539     
1540     inline CodeBlock* baselineCodeBlockForOriginAndBaselineCodeBlock(const CodeOrigin& codeOrigin, CodeBlock* baselineCodeBlock)
1541     {
1542         if (codeOrigin.inlineCallFrame)
1543             return baselineCodeBlockForInlineCallFrame(codeOrigin.inlineCallFrame);
1544         return baselineCodeBlock;
1545     }
1546     
1547     inline int CodeBlock::argumentIndexAfterCapture(size_t argument)
1548     {
1549         if (argument >= static_cast<size_t>(symbolTable()->parameterCount()))
1550             return CallFrame::argumentOffset(argument);
1551
1552         const SlowArgument* slowArguments = symbolTable()->slowArguments();
1553         if (!slowArguments || slowArguments[argument].status == SlowArgument::Normal)
1554             return CallFrame::argumentOffset(argument);
1555
1556         ASSERT(slowArguments[argument].status == SlowArgument::Captured);
1557         return slowArguments[argument].index;
1558     }
1559
1560     inline Register& ExecState::r(int index)
1561     {
1562         CodeBlock* codeBlock = this->codeBlock();
1563         if (codeBlock->isConstantRegisterIndex(index))
1564             return *reinterpret_cast<Register*>(&codeBlock->constantRegister(index));
1565         return this[index];
1566     }
1567
1568     inline Register& ExecState::uncheckedR(int index)
1569     {
1570         ASSERT(index < FirstConstantRegisterIndex);
1571         return this[index];
1572     }
1573
1574 #if ENABLE(DFG_JIT)
1575     inline bool ExecState::isInlineCallFrame()
1576     {
1577         if (LIKELY(!codeBlock() || codeBlock()->getJITType() != JITCode::DFGJIT))
1578             return false;
1579         return isInlineCallFrameSlow();
1580     }
1581 #endif
1582
1583     inline JSValue ExecState::argumentAfterCapture(size_t argument)
1584     {
1585         if (argument >= argumentCount())
1586              return jsUndefined();
1587
1588         if (!codeBlock())
1589             return this[argumentOffset(argument)].jsValue();
1590
1591         return this[codeBlock()->argumentIndexAfterCapture(argument)].jsValue();
1592     }
1593
1594 #if ENABLE(DFG_JIT)
1595     inline void DFGCodeBlocks::mark(void* candidateCodeBlock)
1596     {
1597         // We have to check for 0 and -1 because those are used by the HashMap as markers.
1598         uintptr_t value = reinterpret_cast<uintptr_t>(candidateCodeBlock);
1599         
1600         // This checks for both of those nasty cases in one go.
1601         // 0 + 1 = 1
1602         // -1 + 1 = 0
1603         if (value + 1 <= 1)
1604             return;
1605         
1606         HashSet<CodeBlock*>::iterator iter = m_set.find(static_cast<CodeBlock*>(candidateCodeBlock));
1607         if (iter == m_set.end())
1608             return;
1609         
1610         (*iter)->m_dfgData->mayBeExecuting = true;
1611     }
1612 #endif
1613     
1614     inline JSValue Structure::prototypeForLookup(CodeBlock* codeBlock) const
1615     {
1616         return prototypeForLookup(codeBlock->globalObject());
1617     }
1618
1619 } // namespace JSC
1620
1621 #endif // CodeBlock_h