DFG should be able to set watchpoints on global variables
[WebKit-https.git] / Source / JavaScriptCore / bytecode / CodeBlock.h
1 /*
2  * Copyright (C) 2008, 2009, 2010, 2011, 2012 Apple Inc. All rights reserved.
3  * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1.  Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  * 2.  Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  * 3.  Neither the name of Apple Computer, Inc. ("Apple") nor the names of
15  *     its contributors may be used to endorse or promote products derived
16  *     from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
19  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21  * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
22  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29
30 #ifndef CodeBlock_h
31 #define CodeBlock_h
32
33 #include "BytecodeConventions.h"
34 #include "CallLinkInfo.h"
35 #include "CallReturnOffsetToBytecodeOffset.h"
36 #include "CodeOrigin.h"
37 #include "CodeType.h"
38 #include "CompactJITCodeMap.h"
39 #include "DFGCodeBlocks.h"
40 #include "DFGCommon.h"
41 #include "DFGExitProfile.h"
42 #include "DFGOSREntry.h"
43 #include "DFGOSRExit.h"
44 #include "EvalCodeCache.h"
45 #include "ExecutionCounter.h"
46 #include "ExpressionRangeInfo.h"
47 #include "GlobalResolveInfo.h"
48 #include "HandlerInfo.h"
49 #include "MethodCallLinkInfo.h"
50 #include "Options.h"
51 #include "Instruction.h"
52 #include "JITCode.h"
53 #include "JITWriteBarrier.h"
54 #include "JSGlobalObject.h"
55 #include "JumpTable.h"
56 #include "LLIntCallLinkInfo.h"
57 #include "LazyOperandValueProfile.h"
58 #include "LineInfo.h"
59 #include "Nodes.h"
60 #include "RegExpObject.h"
61 #include "StructureStubInfo.h"
62 #include "UString.h"
63 #include "UnconditionalFinalizer.h"
64 #include "ValueProfile.h"
65 #include "Watchpoint.h"
66 #include <wtf/RefCountedArray.h>
67 #include <wtf/FastAllocBase.h>
68 #include <wtf/PassOwnPtr.h>
69 #include <wtf/RefPtr.h>
70 #include <wtf/SegmentedVector.h>
71 #include <wtf/Vector.h>
72 #include "StructureStubInfo.h"
73
74 namespace JSC {
75
76     class DFGCodeBlocks;
77     class ExecState;
78     class LLIntOffsetsExtractor;
79
80     inline int unmodifiedArgumentsRegister(int argumentsRegister) { return argumentsRegister - 1; }
81
82     static ALWAYS_INLINE int missingThisObjectMarker() { return std::numeric_limits<int>::max(); }
83
84     class CodeBlock : public UnconditionalFinalizer, public WeakReferenceHarvester {
85         WTF_MAKE_FAST_ALLOCATED;
86         friend class JIT;
87         friend class LLIntOffsetsExtractor;
88     public:
89         enum CopyParsedBlockTag { CopyParsedBlock };
90     protected:
91         CodeBlock(CopyParsedBlockTag, CodeBlock& other, SymbolTable*);
92         
93         CodeBlock(ScriptExecutable* ownerExecutable, CodeType, JSGlobalObject*, PassRefPtr<SourceProvider>, unsigned sourceOffset, SymbolTable*, bool isConstructor, PassOwnPtr<CodeBlock> alternative);
94
95         WriteBarrier<JSGlobalObject> m_globalObject;
96         Heap* m_heap;
97
98     public:
99         JS_EXPORT_PRIVATE virtual ~CodeBlock();
100         
101         int numParameters() const { return m_numParameters; }
102         void setNumParameters(int newValue);
103         void addParameter();
104         
105         int* addressOfNumParameters() { return &m_numParameters; }
106         static ptrdiff_t offsetOfNumParameters() { return OBJECT_OFFSETOF(CodeBlock, m_numParameters); }
107
108         CodeBlock* alternative() { return m_alternative.get(); }
109         PassOwnPtr<CodeBlock> releaseAlternative() { return m_alternative.release(); }
110         void setAlternative(PassOwnPtr<CodeBlock> alternative) { m_alternative = alternative; }
111         
112         CodeSpecializationKind specializationKind()
113         {
114             if (m_isConstructor)
115                 return CodeForConstruct;
116             return CodeForCall;
117         }
118         
119 #if ENABLE(JIT)
120         CodeBlock* baselineVersion()
121         {
122             CodeBlock* result = replacement();
123             if (!result)
124                 return 0; // This can happen if we're in the process of creating the baseline version.
125             while (result->alternative())
126                 result = result->alternative();
127             ASSERT(result);
128             ASSERT(JITCode::isBaselineCode(result->getJITType()));
129             return result;
130         }
131 #endif
132         
133         void visitAggregate(SlotVisitor&);
134
135         static void dumpStatistics();
136
137         void dump(ExecState*);
138         void printStructures(const Instruction*);
139         void printStructure(const char* name, const Instruction*, int operand);
140
141         bool isStrictMode() const { return m_isStrictMode; }
142
143         inline bool isKnownNotImmediate(int index)
144         {
145             if (index == m_thisRegister && !m_isStrictMode)
146                 return true;
147
148             if (isConstantRegisterIndex(index))
149                 return getConstant(index).isCell();
150
151             return false;
152         }
153
154         ALWAYS_INLINE bool isTemporaryRegisterIndex(int index)
155         {
156             return index >= m_numVars;
157         }
158
159         HandlerInfo* handlerForBytecodeOffset(unsigned bytecodeOffset);
160         int lineNumberForBytecodeOffset(unsigned bytecodeOffset);
161         void expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot, int& startOffset, int& endOffset);
162
163 #if ENABLE(JIT)
164
165         StructureStubInfo& getStubInfo(ReturnAddressPtr returnAddress)
166         {
167             return *(binarySearch<StructureStubInfo, void*, getStructureStubInfoReturnLocation>(m_structureStubInfos.begin(), m_structureStubInfos.size(), returnAddress.value()));
168         }
169
170         StructureStubInfo& getStubInfo(unsigned bytecodeIndex)
171         {
172             return *(binarySearch<StructureStubInfo, unsigned, getStructureStubInfoBytecodeIndex>(m_structureStubInfos.begin(), m_structureStubInfos.size(), bytecodeIndex));
173         }
174
175         CallLinkInfo& getCallLinkInfo(ReturnAddressPtr returnAddress)
176         {
177             return *(binarySearch<CallLinkInfo, void*, getCallLinkInfoReturnLocation>(m_callLinkInfos.begin(), m_callLinkInfos.size(), returnAddress.value()));
178         }
179         
180         CallLinkInfo& getCallLinkInfo(unsigned bytecodeIndex)
181         {
182             return *(binarySearch<CallLinkInfo, unsigned, getCallLinkInfoBytecodeIndex>(m_callLinkInfos.begin(), m_callLinkInfos.size(), bytecodeIndex));
183         }
184
185         MethodCallLinkInfo& getMethodCallLinkInfo(ReturnAddressPtr returnAddress)
186         {
187             return *(binarySearch<MethodCallLinkInfo, void*, getMethodCallLinkInfoReturnLocation>(m_methodCallLinkInfos.begin(), m_methodCallLinkInfos.size(), returnAddress.value()));
188         }
189
190         MethodCallLinkInfo& getMethodCallLinkInfo(unsigned bytecodeIndex)
191         {
192             return *(binarySearch<MethodCallLinkInfo, unsigned, getMethodCallLinkInfoBytecodeIndex>(m_methodCallLinkInfos.begin(), m_methodCallLinkInfos.size(), bytecodeIndex));
193         }
194
195         unsigned bytecodeOffset(ExecState*, ReturnAddressPtr);
196
197         unsigned bytecodeOffsetForCallAtIndex(unsigned index)
198         {
199             if (!m_rareData)
200                 return 1;
201             Vector<CallReturnOffsetToBytecodeOffset>& callIndices = m_rareData->m_callReturnIndexVector;
202             if (!callIndices.size())
203                 return 1;
204             ASSERT(index < m_rareData->m_callReturnIndexVector.size());
205             return m_rareData->m_callReturnIndexVector[index].bytecodeOffset;
206         }
207
208         void unlinkCalls();
209         
210         bool hasIncomingCalls() { return m_incomingCalls.begin() != m_incomingCalls.end(); }
211         
212         void linkIncomingCall(CallLinkInfo* incoming)
213         {
214             m_incomingCalls.push(incoming);
215         }
216 #if ENABLE(LLINT)
217         void linkIncomingCall(LLIntCallLinkInfo* incoming)
218         {
219             m_incomingLLIntCalls.push(incoming);
220         }
221 #endif // ENABLE(LLINT)
222         
223         void unlinkIncomingCalls();
224 #endif // ENABLE(JIT)
225
226 #if ENABLE(DFG_JIT) || ENABLE(LLINT)
227         void setJITCodeMap(PassOwnPtr<CompactJITCodeMap> jitCodeMap)
228         {
229             m_jitCodeMap = jitCodeMap;
230         }
231         CompactJITCodeMap* jitCodeMap()
232         {
233             return m_jitCodeMap.get();
234         }
235 #endif
236         
237 #if ENABLE(DFG_JIT)
238         void createDFGDataIfNecessary()
239         {
240             if (!!m_dfgData)
241                 return;
242             
243             m_dfgData = adoptPtr(new DFGData);
244         }
245         
246         DFG::OSREntryData* appendDFGOSREntryData(unsigned bytecodeIndex, unsigned machineCodeOffset)
247         {
248             createDFGDataIfNecessary();
249             DFG::OSREntryData entry;
250             entry.m_bytecodeIndex = bytecodeIndex;
251             entry.m_machineCodeOffset = machineCodeOffset;
252             m_dfgData->osrEntry.append(entry);
253             return &m_dfgData->osrEntry.last();
254         }
255         unsigned numberOfDFGOSREntries() const
256         {
257             if (!m_dfgData)
258                 return 0;
259             return m_dfgData->osrEntry.size();
260         }
261         DFG::OSREntryData* dfgOSREntryData(unsigned i) { return &m_dfgData->osrEntry[i]; }
262         DFG::OSREntryData* dfgOSREntryDataForBytecodeIndex(unsigned bytecodeIndex)
263         {
264             if (!m_dfgData)
265                 return 0;
266             if (m_dfgData->osrEntry.isEmpty())
267                 return 0;
268             DFG::OSREntryData* result = binarySearch<
269                 DFG::OSREntryData, unsigned, DFG::getOSREntryDataBytecodeIndex>(
270                     m_dfgData->osrEntry.begin(), m_dfgData->osrEntry.size(),
271                     bytecodeIndex, WTF::KeyMustNotBePresentInArray);
272             if (result->m_bytecodeIndex != bytecodeIndex)
273                 return 0;
274             return result;
275         }
276         
277         unsigned appendOSRExit(const DFG::OSRExit& osrExit)
278         {
279             createDFGDataIfNecessary();
280             unsigned result = m_dfgData->osrExit.size();
281             m_dfgData->osrExit.append(osrExit);
282             return result;
283         }
284         
285         DFG::OSRExit& lastOSRExit()
286         {
287             return m_dfgData->osrExit.last();
288         }
289         
290         unsigned appendSpeculationRecovery(const DFG::SpeculationRecovery& recovery)
291         {
292             createDFGDataIfNecessary();
293             unsigned result = m_dfgData->speculationRecovery.size();
294             m_dfgData->speculationRecovery.append(recovery);
295             return result;
296         }
297         
298         unsigned appendWatchpoint(const Watchpoint& watchpoint)
299         {
300             createDFGDataIfNecessary();
301             unsigned result = m_dfgData->watchpoints.size();
302             m_dfgData->watchpoints.append(watchpoint);
303             return result;
304         }
305         
306         unsigned numberOfOSRExits()
307         {
308             if (!m_dfgData)
309                 return 0;
310             return m_dfgData->osrExit.size();
311         }
312         
313         unsigned numberOfSpeculationRecoveries()
314         {
315             if (!m_dfgData)
316                 return 0;
317             return m_dfgData->speculationRecovery.size();
318         }
319         
320         unsigned numberOfWatchpoints()
321         {
322             if (!m_dfgData)
323                 return 0;
324             return m_dfgData->watchpoints.size();
325         }
326         
327         DFG::OSRExit& osrExit(unsigned index)
328         {
329             return m_dfgData->osrExit[index];
330         }
331         
332         DFG::SpeculationRecovery& speculationRecovery(unsigned index)
333         {
334             return m_dfgData->speculationRecovery[index];
335         }
336         
337         Watchpoint& watchpoint(unsigned index)
338         {
339             return m_dfgData->watchpoints[index];
340         }
341         
342         void appendWeakReference(JSCell* target)
343         {
344             createDFGDataIfNecessary();
345             m_dfgData->weakReferences.append(WriteBarrier<JSCell>(*globalData(), ownerExecutable(), target));
346         }
347         
348         void appendWeakReferenceTransition(JSCell* codeOrigin, JSCell* from, JSCell* to)
349         {
350             createDFGDataIfNecessary();
351             m_dfgData->transitions.append(
352                 WeakReferenceTransition(*globalData(), ownerExecutable(), codeOrigin, from, to));
353         }
354 #endif
355
356         unsigned bytecodeOffset(Instruction* returnAddress)
357         {
358             ASSERT(returnAddress >= instructions().begin() && returnAddress < instructions().end());
359             return static_cast<Instruction*>(returnAddress) - instructions().begin();
360         }
361
362         void setIsNumericCompareFunction(bool isNumericCompareFunction) { m_isNumericCompareFunction = isNumericCompareFunction; }
363         bool isNumericCompareFunction() { return m_isNumericCompareFunction; }
364
365         unsigned numberOfInstructions() const { return m_instructions.size(); }
366         RefCountedArray<Instruction>& instructions() { return m_instructions; }
367         const RefCountedArray<Instruction>& instructions() const { return m_instructions; }
368         
369         size_t predictedMachineCodeSize();
370         
371         bool usesOpcode(OpcodeID);
372
373         unsigned instructionCount() { return m_instructions.size(); }
374
375 #if ENABLE(JIT)
376         void setJITCode(const JITCode& code, MacroAssemblerCodePtr codeWithArityCheck)
377         {
378             m_jitCode = code;
379             m_jitCodeWithArityCheck = codeWithArityCheck;
380 #if ENABLE(DFG_JIT)
381             if (m_jitCode.jitType() == JITCode::DFGJIT) {
382                 createDFGDataIfNecessary();
383                 m_globalData->heap.m_dfgCodeBlocks.m_set.add(this);
384             }
385 #endif
386         }
387         JITCode& getJITCode() { return m_jitCode; }
388         MacroAssemblerCodePtr getJITCodeWithArityCheck() { return m_jitCodeWithArityCheck; }
389         JITCode::JITType getJITType() { return m_jitCode.jitType(); }
390         ExecutableMemoryHandle* executableMemory() { return getJITCode().getExecutableMemory(); }
391         virtual JSObject* compileOptimized(ExecState*, ScopeChainNode*) = 0;
392         virtual void jettison() = 0;
393         enum JITCompilationResult { AlreadyCompiled, CouldNotCompile, CompiledSuccessfully };
394         JITCompilationResult jitCompile(ExecState* exec)
395         {
396             if (getJITType() != JITCode::InterpreterThunk) {
397                 ASSERT(getJITType() == JITCode::BaselineJIT);
398                 return AlreadyCompiled;
399             }
400 #if ENABLE(JIT)
401             if (jitCompileImpl(exec))
402                 return CompiledSuccessfully;
403             return CouldNotCompile;
404 #else
405             UNUSED_PARAM(exec);
406             return CouldNotCompile;
407 #endif
408         }
409         virtual CodeBlock* replacement() = 0;
410
411         virtual DFG::CapabilityLevel canCompileWithDFGInternal() = 0;
412         DFG::CapabilityLevel canCompileWithDFG()
413         {
414             DFG::CapabilityLevel result = canCompileWithDFGInternal();
415             m_canCompileWithDFGState = result;
416             return result;
417         }
418         DFG::CapabilityLevel canCompileWithDFGState() { return m_canCompileWithDFGState; }
419
420         bool hasOptimizedReplacement()
421         {
422             ASSERT(JITCode::isBaselineCode(getJITType()));
423             bool result = replacement()->getJITType() > getJITType();
424 #if !ASSERT_DISABLED
425             if (result)
426                 ASSERT(replacement()->getJITType() == JITCode::DFGJIT);
427             else {
428                 ASSERT(JITCode::isBaselineCode(replacement()->getJITType()));
429                 ASSERT(replacement() == this);
430             }
431 #endif
432             return result;
433         }
434 #else
435         JITCode::JITType getJITType() { return JITCode::BaselineJIT; }
436 #endif
437
438         ScriptExecutable* ownerExecutable() const { return m_ownerExecutable.get(); }
439
440         void setGlobalData(JSGlobalData* globalData) { m_globalData = globalData; }
441         JSGlobalData* globalData() { return m_globalData; }
442
443         void setThisRegister(int thisRegister) { m_thisRegister = thisRegister; }
444         int thisRegister() const { return m_thisRegister; }
445
446         void setNeedsFullScopeChain(bool needsFullScopeChain) { m_needsFullScopeChain = needsFullScopeChain; }
447         bool needsFullScopeChain() const { return m_needsFullScopeChain; }
448         void setUsesEval(bool usesEval) { m_usesEval = usesEval; }
449         bool usesEval() const { return m_usesEval; }
450         
451         void setArgumentsRegister(int argumentsRegister)
452         {
453             ASSERT(argumentsRegister != -1);
454             m_argumentsRegister = argumentsRegister;
455             ASSERT(usesArguments());
456         }
457         int argumentsRegister()
458         {
459             ASSERT(usesArguments());
460             return m_argumentsRegister;
461         }
462         int uncheckedArgumentsRegister()
463         {
464             if (!usesArguments())
465                 return InvalidVirtualRegister;
466             return argumentsRegister();
467         }
468         void setActivationRegister(int activationRegister)
469         {
470             m_activationRegister = activationRegister;
471         }
472         int activationRegister()
473         {
474             ASSERT(needsFullScopeChain());
475             return m_activationRegister;
476         }
477         int uncheckedActivationRegister()
478         {
479             if (!needsFullScopeChain())
480                 return InvalidVirtualRegister;
481             return activationRegister();
482         }
483         bool usesArguments() const { return m_argumentsRegister != -1; }
484         
485         bool needsActivation() const
486         {
487             return needsFullScopeChain() && codeType() != GlobalCode;
488         }
489         
490         bool argumentsAreCaptured() const
491         {
492             return needsActivation() || usesArguments();
493         }
494         
495         bool argumentIsCaptured(int) const
496         {
497             return argumentsAreCaptured();
498         }
499         
500         bool localIsCaptured(InlineCallFrame* inlineCallFrame, int operand) const
501         {
502             if (!inlineCallFrame)
503                 return operand < m_numCapturedVars;
504             
505             return inlineCallFrame->capturedVars.get(operand);
506         }
507         
508         bool isCaptured(InlineCallFrame* inlineCallFrame, int operand) const
509         {
510             if (operandIsArgument(operand))
511                 return argumentIsCaptured(operandToArgument(operand));
512             return localIsCaptured(inlineCallFrame, operand);
513         }
514
515         CodeType codeType() const { return m_codeType; }
516
517         SourceProvider* source() const { return m_source.get(); }
518         unsigned sourceOffset() const { return m_sourceOffset; }
519
520         size_t numberOfJumpTargets() const { return m_jumpTargets.size(); }
521         void addJumpTarget(unsigned jumpTarget) { m_jumpTargets.append(jumpTarget); }
522         unsigned jumpTarget(int index) const { return m_jumpTargets[index]; }
523         unsigned lastJumpTarget() const { return m_jumpTargets.last(); }
524
525         void createActivation(CallFrame*);
526
527         void clearEvalCache();
528
529         void addPropertyAccessInstruction(unsigned propertyAccessInstruction)
530         {
531             m_propertyAccessInstructions.append(propertyAccessInstruction);
532         }
533         void addGlobalResolveInstruction(unsigned globalResolveInstruction)
534         {
535             m_globalResolveInstructions.append(globalResolveInstruction);
536         }
537         bool hasGlobalResolveInstructionAtBytecodeOffset(unsigned bytecodeOffset);
538 #if ENABLE(LLINT)
539         LLIntCallLinkInfo* addLLIntCallLinkInfo()
540         {
541             m_llintCallLinkInfos.append(LLIntCallLinkInfo());
542             return &m_llintCallLinkInfos.last();
543         }
544 #endif
545 #if ENABLE(JIT)
546         void setNumberOfStructureStubInfos(size_t size) { m_structureStubInfos.grow(size); }
547         size_t numberOfStructureStubInfos() const { return m_structureStubInfos.size(); }
548         StructureStubInfo& structureStubInfo(int index) { return m_structureStubInfos[index]; }
549
550         void addGlobalResolveInfo(unsigned globalResolveInstruction)
551         {
552             m_globalResolveInfos.append(GlobalResolveInfo(globalResolveInstruction));
553         }
554         GlobalResolveInfo& globalResolveInfo(int index) { return m_globalResolveInfos[index]; }
555         bool hasGlobalResolveInfoAtBytecodeOffset(unsigned bytecodeOffset);
556
557         void setNumberOfCallLinkInfos(size_t size) { m_callLinkInfos.grow(size); }
558         size_t numberOfCallLinkInfos() const { return m_callLinkInfos.size(); }
559         CallLinkInfo& callLinkInfo(int index) { return m_callLinkInfos[index]; }
560
561         void addMethodCallLinkInfos(unsigned n) { ASSERT(m_globalData->canUseJIT()); m_methodCallLinkInfos.grow(n); }
562         MethodCallLinkInfo& methodCallLinkInfo(int index) { return m_methodCallLinkInfos[index]; }
563         size_t numberOfMethodCallLinkInfos() { return m_methodCallLinkInfos.size(); }
564 #endif
565         
566 #if ENABLE(VALUE_PROFILER)
567         unsigned numberOfArgumentValueProfiles()
568         {
569             ASSERT(m_numParameters >= 0);
570             ASSERT(m_argumentValueProfiles.size() == static_cast<unsigned>(m_numParameters));
571             return m_argumentValueProfiles.size();
572         }
573         ValueProfile* valueProfileForArgument(unsigned argumentIndex)
574         {
575             ValueProfile* result = &m_argumentValueProfiles[argumentIndex];
576             ASSERT(result->m_bytecodeOffset == -1);
577             return result;
578         }
579         
580         ValueProfile* addValueProfile(int bytecodeOffset)
581         {
582             ASSERT(bytecodeOffset != -1);
583             ASSERT(m_valueProfiles.isEmpty() || m_valueProfiles.last().m_bytecodeOffset < bytecodeOffset);
584             m_valueProfiles.append(ValueProfile(bytecodeOffset));
585             return &m_valueProfiles.last();
586         }
587         unsigned numberOfValueProfiles() { return m_valueProfiles.size(); }
588         ValueProfile* valueProfile(int index)
589         {
590             ValueProfile* result = &m_valueProfiles[index];
591             ASSERT(result->m_bytecodeOffset != -1);
592             return result;
593         }
594         ValueProfile* valueProfileForBytecodeOffset(int bytecodeOffset)
595         {
596             ValueProfile* result = WTF::genericBinarySearch<ValueProfile, int, getValueProfileBytecodeOffset>(m_valueProfiles, m_valueProfiles.size(), bytecodeOffset);
597             ASSERT(result->m_bytecodeOffset != -1);
598             ASSERT(instructions()[bytecodeOffset + opcodeLength(
599                        m_globalData->interpreter->getOpcodeID(
600                            instructions()[
601                                bytecodeOffset].u.opcode)) - 1].u.profile == result);
602             return result;
603         }
604         SpeculatedType valueProfilePredictionForBytecodeOffset(int bytecodeOffset)
605         {
606             return valueProfileForBytecodeOffset(bytecodeOffset)->computeUpdatedPrediction();
607         }
608         
609         unsigned totalNumberOfValueProfiles()
610         {
611             return numberOfArgumentValueProfiles() + numberOfValueProfiles();
612         }
613         ValueProfile* getFromAllValueProfiles(unsigned index)
614         {
615             if (index < numberOfArgumentValueProfiles())
616                 return valueProfileForArgument(index);
617             return valueProfile(index - numberOfArgumentValueProfiles());
618         }
619         
620         RareCaseProfile* addRareCaseProfile(int bytecodeOffset)
621         {
622             m_rareCaseProfiles.append(RareCaseProfile(bytecodeOffset));
623             return &m_rareCaseProfiles.last();
624         }
625         unsigned numberOfRareCaseProfiles() { return m_rareCaseProfiles.size(); }
626         RareCaseProfile* rareCaseProfile(int index) { return &m_rareCaseProfiles[index]; }
627         RareCaseProfile* rareCaseProfileForBytecodeOffset(int bytecodeOffset)
628         {
629             return WTF::genericBinarySearch<RareCaseProfile, int, getRareCaseProfileBytecodeOffset>(m_rareCaseProfiles, m_rareCaseProfiles.size(), bytecodeOffset);
630         }
631         
632         bool likelyToTakeSlowCase(int bytecodeOffset)
633         {
634             if (!numberOfRareCaseProfiles())
635                 return false;
636             unsigned value = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
637             return value >= Options::likelyToTakeSlowCaseMinimumCount && static_cast<double>(value) / m_executionEntryCount >= Options::likelyToTakeSlowCaseThreshold;
638         }
639         
640         bool couldTakeSlowCase(int bytecodeOffset)
641         {
642             if (!numberOfRareCaseProfiles())
643                 return false;
644             unsigned value = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
645             return value >= Options::couldTakeSlowCaseMinimumCount && static_cast<double>(value) / m_executionEntryCount >= Options::couldTakeSlowCaseThreshold;
646         }
647         
648         RareCaseProfile* addSpecialFastCaseProfile(int bytecodeOffset)
649         {
650             m_specialFastCaseProfiles.append(RareCaseProfile(bytecodeOffset));
651             return &m_specialFastCaseProfiles.last();
652         }
653         unsigned numberOfSpecialFastCaseProfiles() { return m_specialFastCaseProfiles.size(); }
654         RareCaseProfile* specialFastCaseProfile(int index) { return &m_specialFastCaseProfiles[index]; }
655         RareCaseProfile* specialFastCaseProfileForBytecodeOffset(int bytecodeOffset)
656         {
657             return WTF::genericBinarySearch<RareCaseProfile, int, getRareCaseProfileBytecodeOffset>(m_specialFastCaseProfiles, m_specialFastCaseProfiles.size(), bytecodeOffset);
658         }
659         
660         bool likelyToTakeSpecialFastCase(int bytecodeOffset)
661         {
662             if (!numberOfRareCaseProfiles())
663                 return false;
664             unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
665             return specialFastCaseCount >= Options::likelyToTakeSlowCaseMinimumCount && static_cast<double>(specialFastCaseCount) / m_executionEntryCount >= Options::likelyToTakeSlowCaseThreshold;
666         }
667         
668         bool likelyToTakeDeepestSlowCase(int bytecodeOffset)
669         {
670             if (!numberOfRareCaseProfiles())
671                 return false;
672             unsigned slowCaseCount = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
673             unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
674             unsigned value = slowCaseCount - specialFastCaseCount;
675             return value >= Options::likelyToTakeSlowCaseMinimumCount && static_cast<double>(value) / m_executionEntryCount >= Options::likelyToTakeSlowCaseThreshold;
676         }
677         
678         bool likelyToTakeAnySlowCase(int bytecodeOffset)
679         {
680             if (!numberOfRareCaseProfiles())
681                 return false;
682             unsigned slowCaseCount = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
683             unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
684             unsigned value = slowCaseCount + specialFastCaseCount;
685             return value >= Options::likelyToTakeSlowCaseMinimumCount && static_cast<double>(value) / m_executionEntryCount >= Options::likelyToTakeSlowCaseThreshold;
686         }
687         
688         unsigned executionEntryCount() const { return m_executionEntryCount; }
689 #endif
690
691         unsigned globalResolveInfoCount() const
692         {
693 #if ENABLE(JIT)    
694             if (m_globalData->canUseJIT())
695                 return m_globalResolveInfos.size();
696 #endif
697             return 0;
698         }
699
700         // Exception handling support
701
702         size_t numberOfExceptionHandlers() const { return m_rareData ? m_rareData->m_exceptionHandlers.size() : 0; }
703         void addExceptionHandler(const HandlerInfo& hanler) { createRareDataIfNecessary(); return m_rareData->m_exceptionHandlers.append(hanler); }
704         HandlerInfo& exceptionHandler(int index) { ASSERT(m_rareData); return m_rareData->m_exceptionHandlers[index]; }
705
706         void addExpressionInfo(const ExpressionRangeInfo& expressionInfo)
707         {
708             createRareDataIfNecessary();
709             m_rareData->m_expressionInfo.append(expressionInfo);
710         }
711
712         void addLineInfo(unsigned bytecodeOffset, int lineNo)
713         {
714             createRareDataIfNecessary();
715             Vector<LineInfo>& lineInfo = m_rareData->m_lineInfo;
716             if (!lineInfo.size() || lineInfo.last().lineNumber != lineNo) {
717                 LineInfo info = { bytecodeOffset, lineNo };
718                 lineInfo.append(info);
719             }
720         }
721
722         bool hasExpressionInfo() { return m_rareData && m_rareData->m_expressionInfo.size(); }
723         bool hasLineInfo() { return m_rareData && m_rareData->m_lineInfo.size(); }
724         //  We only generate exception handling info if the user is debugging
725         // (and may want line number info), or if the function contains exception handler.
726         bool needsCallReturnIndices()
727         {
728             return m_rareData &&
729                 (m_rareData->m_expressionInfo.size() || m_rareData->m_lineInfo.size() || m_rareData->m_exceptionHandlers.size());
730         }
731
732 #if ENABLE(JIT)
733         Vector<CallReturnOffsetToBytecodeOffset>& callReturnIndexVector()
734         {
735             createRareDataIfNecessary();
736             return m_rareData->m_callReturnIndexVector;
737         }
738 #endif
739
740 #if ENABLE(DFG_JIT)
741         SegmentedVector<InlineCallFrame, 4>& inlineCallFrames()
742         {
743             createRareDataIfNecessary();
744             return m_rareData->m_inlineCallFrames;
745         }
746         
747         Vector<CodeOriginAtCallReturnOffset>& codeOrigins()
748         {
749             createRareDataIfNecessary();
750             return m_rareData->m_codeOrigins;
751         }
752         
753         // Having code origins implies that there has been some inlining.
754         bool hasCodeOrigins()
755         {
756             return m_rareData && !!m_rareData->m_codeOrigins.size();
757         }
758         
759         bool codeOriginForReturn(ReturnAddressPtr returnAddress, CodeOrigin& codeOrigin)
760         {
761             if (!hasCodeOrigins())
762                 return false;
763             unsigned offset = getJITCode().offsetOf(returnAddress.value());
764             CodeOriginAtCallReturnOffset* entry = binarySearch<CodeOriginAtCallReturnOffset, unsigned, getCallReturnOffsetForCodeOrigin>(codeOrigins().begin(), codeOrigins().size(), offset, WTF::KeyMustNotBePresentInArray);
765             if (entry->callReturnOffset != offset)
766                 return false;
767             codeOrigin = entry->codeOrigin;
768             return true;
769         }
770         
771         CodeOrigin codeOrigin(unsigned index)
772         {
773             ASSERT(m_rareData);
774             return m_rareData->m_codeOrigins[index].codeOrigin;
775         }
776         
777         bool addFrequentExitSite(const DFG::FrequentExitSite& site)
778         {
779             ASSERT(JITCode::isBaselineCode(getJITType()));
780             return m_exitProfile.add(site);
781         }
782
783         DFG::ExitProfile& exitProfile() { return m_exitProfile; }
784         
785         CompressedLazyOperandValueProfileHolder& lazyOperandValueProfiles()
786         {
787             return m_lazyOperandValueProfiles;
788         }
789 #endif
790
791         // Constant Pool
792
793         size_t numberOfIdentifiers() const { return m_identifiers.size(); }
794         void addIdentifier(const Identifier& i) { return m_identifiers.append(i); }
795         Identifier& identifier(int index) { return m_identifiers[index]; }
796
797         size_t numberOfConstantRegisters() const { return m_constantRegisters.size(); }
798         unsigned addConstant(JSValue v)
799         {
800             unsigned result = m_constantRegisters.size();
801             m_constantRegisters.append(WriteBarrier<Unknown>());
802             m_constantRegisters.last().set(m_globalObject->globalData(), m_ownerExecutable.get(), v);
803             return result;
804         }
805         unsigned addOrFindConstant(JSValue);
806         WriteBarrier<Unknown>& constantRegister(int index) { return m_constantRegisters[index - FirstConstantRegisterIndex]; }
807         ALWAYS_INLINE bool isConstantRegisterIndex(int index) const { return index >= FirstConstantRegisterIndex; }
808         ALWAYS_INLINE JSValue getConstant(int index) const { return m_constantRegisters[index - FirstConstantRegisterIndex].get(); }
809
810         unsigned addFunctionDecl(FunctionExecutable* n)
811         {
812             unsigned size = m_functionDecls.size();
813             m_functionDecls.append(WriteBarrier<FunctionExecutable>());
814             m_functionDecls.last().set(m_globalObject->globalData(), m_ownerExecutable.get(), n);
815             return size;
816         }
817         FunctionExecutable* functionDecl(int index) { return m_functionDecls[index].get(); }
818         int numberOfFunctionDecls() { return m_functionDecls.size(); }
819         unsigned addFunctionExpr(FunctionExecutable* n)
820         {
821             unsigned size = m_functionExprs.size();
822             m_functionExprs.append(WriteBarrier<FunctionExecutable>());
823             m_functionExprs.last().set(m_globalObject->globalData(), m_ownerExecutable.get(), n);
824             return size;
825         }
826         FunctionExecutable* functionExpr(int index) { return m_functionExprs[index].get(); }
827
828         unsigned addRegExp(RegExp* r)
829         {
830             createRareDataIfNecessary();
831             unsigned size = m_rareData->m_regexps.size();
832             m_rareData->m_regexps.append(WriteBarrier<RegExp>(*m_globalData, ownerExecutable(), r));
833             return size;
834         }
835         unsigned numberOfRegExps() const
836         {
837             if (!m_rareData)
838                 return 0;
839             return m_rareData->m_regexps.size();
840         }
841         RegExp* regexp(int index) const { ASSERT(m_rareData); return m_rareData->m_regexps[index].get(); }
842
843         unsigned addConstantBuffer(unsigned length)
844         {
845             createRareDataIfNecessary();
846             unsigned size = m_rareData->m_constantBuffers.size();
847             m_rareData->m_constantBuffers.append(Vector<JSValue>(length));
848             return size;
849         }
850
851         JSValue* constantBuffer(unsigned index)
852         {
853             ASSERT(m_rareData);
854             return m_rareData->m_constantBuffers[index].data();
855         }
856
857         JSGlobalObject* globalObject() { return m_globalObject.get(); }
858         
859         JSGlobalObject* globalObjectFor(CodeOrigin codeOrigin)
860         {
861             if (!codeOrigin.inlineCallFrame)
862                 return globalObject();
863             // FIXME: if we ever inline based on executable not function, this code will need to change.
864             return codeOrigin.inlineCallFrame->callee->scope()->globalObject.get();
865         }
866
867         // Jump Tables
868
869         size_t numberOfImmediateSwitchJumpTables() const { return m_rareData ? m_rareData->m_immediateSwitchJumpTables.size() : 0; }
870         SimpleJumpTable& addImmediateSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_immediateSwitchJumpTables.append(SimpleJumpTable()); return m_rareData->m_immediateSwitchJumpTables.last(); }
871         SimpleJumpTable& immediateSwitchJumpTable(int tableIndex) { ASSERT(m_rareData); return m_rareData->m_immediateSwitchJumpTables[tableIndex]; }
872
873         size_t numberOfCharacterSwitchJumpTables() const { return m_rareData ? m_rareData->m_characterSwitchJumpTables.size() : 0; }
874         SimpleJumpTable& addCharacterSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_characterSwitchJumpTables.append(SimpleJumpTable()); return m_rareData->m_characterSwitchJumpTables.last(); }
875         SimpleJumpTable& characterSwitchJumpTable(int tableIndex) { ASSERT(m_rareData); return m_rareData->m_characterSwitchJumpTables[tableIndex]; }
876
877         size_t numberOfStringSwitchJumpTables() const { return m_rareData ? m_rareData->m_stringSwitchJumpTables.size() : 0; }
878         StringJumpTable& addStringSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_stringSwitchJumpTables.append(StringJumpTable()); return m_rareData->m_stringSwitchJumpTables.last(); }
879         StringJumpTable& stringSwitchJumpTable(int tableIndex) { ASSERT(m_rareData); return m_rareData->m_stringSwitchJumpTables[tableIndex]; }
880
881
882         SymbolTable* symbolTable() { return m_symbolTable; }
883         SharedSymbolTable* sharedSymbolTable() { ASSERT(m_codeType == FunctionCode); return static_cast<SharedSymbolTable*>(m_symbolTable); }
884
885         EvalCodeCache& evalCodeCache() { createRareDataIfNecessary(); return m_rareData->m_evalCodeCache; }
886
887         enum ShrinkMode {
888             // Shrink prior to generating machine code that may point directly into vectors.
889             EarlyShrink,
890             
891             // Shrink after generating machine code, and after possibly creating new vectors
892             // and appending to others. At this time it is not safe to shrink certain vectors
893             // because we would have generated machine code that references them directly.
894             LateShrink
895         };
896         void shrinkToFit(ShrinkMode);
897         
898         void copyPostParseDataFrom(CodeBlock* alternative);
899         void copyPostParseDataFromAlternative();
900         
901         // Functions for controlling when JITting kicks in, in a mixed mode
902         // execution world.
903         
904         bool checkIfJITThresholdReached()
905         {
906             return m_llintExecuteCounter.checkIfThresholdCrossedAndSet(this);
907         }
908         
909         void dontJITAnytimeSoon()
910         {
911             m_llintExecuteCounter.deferIndefinitely();
912         }
913         
914         void jitAfterWarmUp()
915         {
916             m_llintExecuteCounter.setNewThreshold(Options::thresholdForJITAfterWarmUp, this);
917         }
918         
919         void jitSoon()
920         {
921             m_llintExecuteCounter.setNewThreshold(Options::thresholdForJITSoon, this);
922         }
923         
924         int32_t llintExecuteCounter() const
925         {
926             return m_llintExecuteCounter.m_counter;
927         }
928         
929         // Functions for controlling when tiered compilation kicks in. This
930         // controls both when the optimizing compiler is invoked and when OSR
931         // entry happens. Two triggers exist: the loop trigger and the return
932         // trigger. In either case, when an addition to m_jitExecuteCounter
933         // causes it to become non-negative, the optimizing compiler is
934         // invoked. This includes a fast check to see if this CodeBlock has
935         // already been optimized (i.e. replacement() returns a CodeBlock
936         // that was optimized with a higher tier JIT than this one). In the
937         // case of the loop trigger, if the optimized compilation succeeds
938         // (or has already succeeded in the past) then OSR is attempted to
939         // redirect program flow into the optimized code.
940         
941         // These functions are called from within the optimization triggers,
942         // and are used as a single point at which we define the heuristics
943         // for how much warm-up is mandated before the next optimization
944         // trigger files. All CodeBlocks start out with optimizeAfterWarmUp(),
945         // as this is called from the CodeBlock constructor.
946         
947         // When we observe a lot of speculation failures, we trigger a
948         // reoptimization. But each time, we increase the optimization trigger
949         // to avoid thrashing.
950         unsigned reoptimizationRetryCounter() const
951         {
952             ASSERT(m_reoptimizationRetryCounter <= Options::reoptimizationRetryCounterMax);
953             return m_reoptimizationRetryCounter;
954         }
955         
956         void countReoptimization()
957         {
958             m_reoptimizationRetryCounter++;
959             if (m_reoptimizationRetryCounter > Options::reoptimizationRetryCounterMax)
960                 m_reoptimizationRetryCounter = Options::reoptimizationRetryCounterMax;
961         }
962         
963         int32_t counterValueForOptimizeAfterWarmUp()
964         {
965             return Options::thresholdForOptimizeAfterWarmUp << reoptimizationRetryCounter();
966         }
967         
968         int32_t counterValueForOptimizeAfterLongWarmUp()
969         {
970             return Options::thresholdForOptimizeAfterLongWarmUp << reoptimizationRetryCounter();
971         }
972         
973         int32_t* addressOfJITExecuteCounter()
974         {
975             return &m_jitExecuteCounter.m_counter;
976         }
977         
978         static ptrdiff_t offsetOfJITExecuteCounter() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(ExecutionCounter, m_counter); }
979         static ptrdiff_t offsetOfJITExecutionActiveThreshold() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(ExecutionCounter, m_activeThreshold); }
980         static ptrdiff_t offsetOfJITExecutionTotalCount() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(ExecutionCounter, m_totalCount); }
981
982         int32_t jitExecuteCounter() const { return m_jitExecuteCounter.m_counter; }
983         
984         unsigned optimizationDelayCounter() const { return m_optimizationDelayCounter; }
985         
986         // Check if the optimization threshold has been reached, and if not,
987         // adjust the heuristics accordingly. Returns true if the threshold has
988         // been reached.
989         bool checkIfOptimizationThresholdReached()
990         {
991             return m_jitExecuteCounter.checkIfThresholdCrossedAndSet(this);
992         }
993         
994         // Call this to force the next optimization trigger to fire. This is
995         // rarely wise, since optimization triggers are typically more
996         // expensive than executing baseline code.
997         void optimizeNextInvocation()
998         {
999             m_jitExecuteCounter.setNewThreshold(0, this);
1000         }
1001         
1002         // Call this to prevent optimization from happening again. Note that
1003         // optimization will still happen after roughly 2^29 invocations,
1004         // so this is really meant to delay that as much as possible. This
1005         // is called if optimization failed, and we expect it to fail in
1006         // the future as well.
1007         void dontOptimizeAnytimeSoon()
1008         {
1009             m_jitExecuteCounter.deferIndefinitely();
1010         }
1011         
1012         // Call this to reinitialize the counter to its starting state,
1013         // forcing a warm-up to happen before the next optimization trigger
1014         // fires. This is called in the CodeBlock constructor. It also
1015         // makes sense to call this if an OSR exit occurred. Note that
1016         // OSR exit code is code generated, so the value of the execute
1017         // counter that this corresponds to is also available directly.
1018         void optimizeAfterWarmUp()
1019         {
1020             m_jitExecuteCounter.setNewThreshold(counterValueForOptimizeAfterWarmUp(), this);
1021         }
1022         
1023         // Call this to force an optimization trigger to fire only after
1024         // a lot of warm-up.
1025         void optimizeAfterLongWarmUp()
1026         {
1027             m_jitExecuteCounter.setNewThreshold(counterValueForOptimizeAfterLongWarmUp(), this);
1028         }
1029         
1030         // Call this to cause an optimization trigger to fire soon, but
1031         // not necessarily the next one. This makes sense if optimization
1032         // succeeds. Successfuly optimization means that all calls are
1033         // relinked to the optimized code, so this only affects call
1034         // frames that are still executing this CodeBlock. The value here
1035         // is tuned to strike a balance between the cost of OSR entry
1036         // (which is too high to warrant making every loop back edge to
1037         // trigger OSR immediately) and the cost of executing baseline
1038         // code (which is high enough that we don't necessarily want to
1039         // have a full warm-up). The intuition for calling this instead of
1040         // optimizeNextInvocation() is for the case of recursive functions
1041         // with loops. Consider that there may be N call frames of some
1042         // recursive function, for a reasonably large value of N. The top
1043         // one triggers optimization, and then returns, and then all of
1044         // the others return. We don't want optimization to be triggered on
1045         // each return, as that would be superfluous. It only makes sense
1046         // to trigger optimization if one of those functions becomes hot
1047         // in the baseline code.
1048         void optimizeSoon()
1049         {
1050             m_jitExecuteCounter.setNewThreshold(Options::thresholdForOptimizeSoon << reoptimizationRetryCounter(), this);
1051         }
1052         
1053         // The speculative JIT tracks its success rate, so that we can
1054         // decide when to reoptimize. It's interesting to note that these
1055         // counters may overflow without any protection. The success
1056         // counter will overflow before the fail one does, becuase the
1057         // fail one is used as a trigger to reoptimize. So the worst case
1058         // is that the success counter overflows and we reoptimize without
1059         // needing to. But this is harmless. If a method really did
1060         // execute 2^32 times then compiling it again probably won't hurt
1061         // anyone.
1062         
1063         void countSpeculationSuccess()
1064         {
1065             m_speculativeSuccessCounter++;
1066         }
1067         
1068         void countSpeculationFailure()
1069         {
1070             m_speculativeFailCounter++;
1071         }
1072         
1073         uint32_t speculativeSuccessCounter() const { return m_speculativeSuccessCounter; }
1074         uint32_t speculativeFailCounter() const { return m_speculativeFailCounter; }
1075         uint32_t forcedOSRExitCounter() const { return m_forcedOSRExitCounter; }
1076         
1077         uint32_t* addressOfSpeculativeSuccessCounter() { return &m_speculativeSuccessCounter; }
1078         uint32_t* addressOfSpeculativeFailCounter() { return &m_speculativeFailCounter; }
1079         uint32_t* addressOfForcedOSRExitCounter() { return &m_forcedOSRExitCounter; }
1080         
1081         static ptrdiff_t offsetOfSpeculativeSuccessCounter() { return OBJECT_OFFSETOF(CodeBlock, m_speculativeSuccessCounter); }
1082         static ptrdiff_t offsetOfSpeculativeFailCounter() { return OBJECT_OFFSETOF(CodeBlock, m_speculativeFailCounter); }
1083         static ptrdiff_t offsetOfForcedOSRExitCounter() { return OBJECT_OFFSETOF(CodeBlock, m_forcedOSRExitCounter); }
1084
1085 #if ENABLE(JIT)
1086         // The number of failures that triggers the use of the ratio.
1087         unsigned largeFailCountThreshold() { return Options::largeFailCountThresholdBase << baselineVersion()->reoptimizationRetryCounter(); }
1088         unsigned largeFailCountThresholdForLoop() { return Options::largeFailCountThresholdBaseForLoop << baselineVersion()->reoptimizationRetryCounter(); }
1089
1090         bool shouldReoptimizeNow()
1091         {
1092             return (Options::desiredSpeculativeSuccessFailRatio *
1093                         speculativeFailCounter() >= speculativeSuccessCounter()
1094                     && speculativeFailCounter() >= largeFailCountThreshold())
1095                 || forcedOSRExitCounter() >=
1096                        Options::forcedOSRExitCountForReoptimization;
1097         }
1098
1099         bool shouldReoptimizeFromLoopNow()
1100         {
1101             return (Options::desiredSpeculativeSuccessFailRatio *
1102                         speculativeFailCounter() >= speculativeSuccessCounter()
1103                     && speculativeFailCounter() >= largeFailCountThresholdForLoop())
1104                 || forcedOSRExitCounter() >=
1105                        Options::forcedOSRExitCountForReoptimization;
1106         }
1107 #endif
1108
1109 #if ENABLE(VALUE_PROFILER)
1110         bool shouldOptimizeNow();
1111 #else
1112         bool shouldOptimizeNow() { return false; }
1113 #endif
1114         
1115 #if ENABLE(JIT)
1116         void reoptimize()
1117         {
1118             ASSERT(replacement() != this);
1119             ASSERT(replacement()->alternative() == this);
1120             replacement()->tallyFrequentExitSites();
1121             replacement()->jettison();
1122             countReoptimization();
1123             optimizeAfterWarmUp();
1124         }
1125 #endif
1126
1127 #if ENABLE(VERBOSE_VALUE_PROFILE)
1128         void dumpValueProfiles();
1129 #endif
1130         
1131         // FIXME: Make these remaining members private.
1132
1133         int m_numCalleeRegisters;
1134         int m_numVars;
1135         int m_numCapturedVars;
1136         bool m_isConstructor;
1137
1138     protected:
1139 #if ENABLE(JIT)
1140         virtual bool jitCompileImpl(ExecState*) = 0;
1141 #endif
1142         virtual void visitWeakReferences(SlotVisitor&);
1143         virtual void finalizeUnconditionally();
1144         
1145     private:
1146         friend class DFGCodeBlocks;
1147         
1148 #if ENABLE(DFG_JIT)
1149         void tallyFrequentExitSites();
1150 #else
1151         void tallyFrequentExitSites() { }
1152 #endif
1153         
1154         void dump(ExecState*, const Vector<Instruction>::const_iterator& begin, Vector<Instruction>::const_iterator&);
1155
1156         CString registerName(ExecState*, int r) const;
1157         void printUnaryOp(ExecState*, int location, Vector<Instruction>::const_iterator&, const char* op);
1158         void printBinaryOp(ExecState*, int location, Vector<Instruction>::const_iterator&, const char* op);
1159         void printConditionalJump(ExecState*, const Vector<Instruction>::const_iterator&, Vector<Instruction>::const_iterator&, int location, const char* op);
1160         void printGetByIdOp(ExecState*, int location, Vector<Instruction>::const_iterator&);
1161         void printGetByIdCacheStatus(ExecState*, int location);
1162         enum CacheDumpMode { DumpCaches, DontDumpCaches };
1163         void printCallOp(ExecState*, int location, Vector<Instruction>::const_iterator&, const char* op, CacheDumpMode);
1164         void printPutByIdOp(ExecState*, int location, Vector<Instruction>::const_iterator&, const char* op);
1165         void visitStructures(SlotVisitor&, Instruction* vPC);
1166         
1167 #if ENABLE(DFG_JIT)
1168         bool shouldImmediatelyAssumeLivenessDuringScan()
1169         {
1170             // Null m_dfgData means that this is a baseline JIT CodeBlock. Baseline JIT
1171             // CodeBlocks don't need to be jettisoned when their weak references go
1172             // stale. So if a basline JIT CodeBlock gets scanned, we can assume that
1173             // this means that it's live.
1174             if (!m_dfgData)
1175                 return true;
1176             
1177             // For simplicity, we don't attempt to jettison code blocks during GC if
1178             // they are executing. Instead we strongly mark their weak references to
1179             // allow them to continue to execute soundly.
1180             if (m_dfgData->mayBeExecuting)
1181                 return true;
1182
1183             return false;
1184         }
1185 #else
1186         bool shouldImmediatelyAssumeLivenessDuringScan() { return true; }
1187 #endif
1188         
1189         void performTracingFixpointIteration(SlotVisitor&);
1190         
1191         void stronglyVisitStrongReferences(SlotVisitor&);
1192         void stronglyVisitWeakReferences(SlotVisitor&);
1193
1194         void createRareDataIfNecessary()
1195         {
1196             if (!m_rareData)
1197                 m_rareData = adoptPtr(new RareData);
1198         }
1199         
1200         int m_numParameters;
1201
1202         WriteBarrier<ScriptExecutable> m_ownerExecutable;
1203         JSGlobalData* m_globalData;
1204
1205         RefCountedArray<Instruction> m_instructions;
1206
1207         int m_thisRegister;
1208         int m_argumentsRegister;
1209         int m_activationRegister;
1210
1211         bool m_needsFullScopeChain;
1212         bool m_usesEval;
1213         bool m_isNumericCompareFunction;
1214         bool m_isStrictMode;
1215
1216         CodeType m_codeType;
1217
1218         RefPtr<SourceProvider> m_source;
1219         unsigned m_sourceOffset;
1220
1221         Vector<unsigned> m_propertyAccessInstructions;
1222         Vector<unsigned> m_globalResolveInstructions;
1223 #if ENABLE(LLINT)
1224         SegmentedVector<LLIntCallLinkInfo, 8> m_llintCallLinkInfos;
1225         SentinelLinkedList<LLIntCallLinkInfo, BasicRawSentinelNode<LLIntCallLinkInfo> > m_incomingLLIntCalls;
1226 #endif
1227 #if ENABLE(JIT)
1228         Vector<StructureStubInfo> m_structureStubInfos;
1229         Vector<GlobalResolveInfo> m_globalResolveInfos;
1230         Vector<CallLinkInfo> m_callLinkInfos;
1231         Vector<MethodCallLinkInfo> m_methodCallLinkInfos;
1232         JITCode m_jitCode;
1233         MacroAssemblerCodePtr m_jitCodeWithArityCheck;
1234         SentinelLinkedList<CallLinkInfo, BasicRawSentinelNode<CallLinkInfo> > m_incomingCalls;
1235 #endif
1236 #if ENABLE(DFG_JIT) || ENABLE(LLINT)
1237         OwnPtr<CompactJITCodeMap> m_jitCodeMap;
1238 #endif
1239 #if ENABLE(DFG_JIT)
1240         struct WeakReferenceTransition {
1241             WeakReferenceTransition() { }
1242             
1243             WeakReferenceTransition(JSGlobalData& globalData, JSCell* owner, JSCell* codeOrigin, JSCell* from, JSCell* to)
1244                 : m_from(globalData, owner, from)
1245                 , m_to(globalData, owner, to)
1246             {
1247                 if (!!codeOrigin)
1248                     m_codeOrigin.set(globalData, owner, codeOrigin);
1249             }
1250
1251             WriteBarrier<JSCell> m_codeOrigin;
1252             WriteBarrier<JSCell> m_from;
1253             WriteBarrier<JSCell> m_to;
1254         };
1255         
1256         struct DFGData {
1257             DFGData()
1258                 : mayBeExecuting(false)
1259                 , isJettisoned(false)
1260             {
1261             }
1262             
1263             Vector<DFG::OSREntryData> osrEntry;
1264             SegmentedVector<DFG::OSRExit, 8> osrExit;
1265             Vector<DFG::SpeculationRecovery> speculationRecovery;
1266             SegmentedVector<Watchpoint, 1, 0> watchpoints;
1267             Vector<WeakReferenceTransition> transitions;
1268             Vector<WriteBarrier<JSCell> > weakReferences;
1269             bool mayBeExecuting;
1270             bool isJettisoned;
1271             bool livenessHasBeenProved; // Initialized and used on every GC.
1272             bool allTransitionsHaveBeenMarked; // Initialized and used on every GC.
1273             unsigned visitAggregateHasBeenCalled; // Unsigned to make it work seamlessly with the broadest set of CAS implementations.
1274         };
1275         
1276         OwnPtr<DFGData> m_dfgData;
1277         
1278         // This is relevant to non-DFG code blocks that serve as the profiled code block
1279         // for DFG code blocks.
1280         DFG::ExitProfile m_exitProfile;
1281         CompressedLazyOperandValueProfileHolder m_lazyOperandValueProfiles;
1282 #endif
1283 #if ENABLE(VALUE_PROFILER)
1284         Vector<ValueProfile> m_argumentValueProfiles;
1285         SegmentedVector<ValueProfile, 8> m_valueProfiles;
1286         SegmentedVector<RareCaseProfile, 8> m_rareCaseProfiles;
1287         SegmentedVector<RareCaseProfile, 8> m_specialFastCaseProfiles;
1288         unsigned m_executionEntryCount;
1289 #endif
1290
1291         Vector<unsigned> m_jumpTargets;
1292         Vector<unsigned> m_loopTargets;
1293
1294         // Constant Pool
1295         Vector<Identifier> m_identifiers;
1296         COMPILE_ASSERT(sizeof(Register) == sizeof(WriteBarrier<Unknown>), Register_must_be_same_size_as_WriteBarrier_Unknown);
1297         Vector<WriteBarrier<Unknown> > m_constantRegisters;
1298         Vector<WriteBarrier<FunctionExecutable> > m_functionDecls;
1299         Vector<WriteBarrier<FunctionExecutable> > m_functionExprs;
1300
1301         SymbolTable* m_symbolTable;
1302
1303         OwnPtr<CodeBlock> m_alternative;
1304         
1305         ExecutionCounter m_llintExecuteCounter;
1306         
1307         ExecutionCounter m_jitExecuteCounter;
1308         int32_t m_totalJITExecutions;
1309         uint32_t m_speculativeSuccessCounter;
1310         uint32_t m_speculativeFailCounter;
1311         uint32_t m_forcedOSRExitCounter;
1312         uint16_t m_optimizationDelayCounter;
1313         uint16_t m_reoptimizationRetryCounter;
1314         
1315         struct RareData {
1316            WTF_MAKE_FAST_ALLOCATED;
1317         public:
1318             Vector<HandlerInfo> m_exceptionHandlers;
1319
1320             // Rare Constants
1321             Vector<WriteBarrier<RegExp> > m_regexps;
1322
1323             // Buffers used for large array literals
1324             Vector<Vector<JSValue> > m_constantBuffers;
1325             
1326             // Jump Tables
1327             Vector<SimpleJumpTable> m_immediateSwitchJumpTables;
1328             Vector<SimpleJumpTable> m_characterSwitchJumpTables;
1329             Vector<StringJumpTable> m_stringSwitchJumpTables;
1330
1331             EvalCodeCache m_evalCodeCache;
1332
1333             // Expression info - present if debugging.
1334             Vector<ExpressionRangeInfo> m_expressionInfo;
1335             // Line info - present if profiling or debugging.
1336             Vector<LineInfo> m_lineInfo;
1337 #if ENABLE(JIT)
1338             Vector<CallReturnOffsetToBytecodeOffset> m_callReturnIndexVector;
1339 #endif
1340 #if ENABLE(DFG_JIT)
1341             SegmentedVector<InlineCallFrame, 4> m_inlineCallFrames;
1342             Vector<CodeOriginAtCallReturnOffset> m_codeOrigins;
1343 #endif
1344         };
1345 #if COMPILER(MSVC)
1346         friend void WTF::deleteOwnedPtr<RareData>(RareData*);
1347 #endif
1348         OwnPtr<RareData> m_rareData;
1349 #if ENABLE(JIT)
1350         DFG::CapabilityLevel m_canCompileWithDFGState;
1351 #endif
1352     };
1353
1354     // Program code is not marked by any function, so we make the global object
1355     // responsible for marking it.
1356
1357     class GlobalCodeBlock : public CodeBlock {
1358     protected:
1359         GlobalCodeBlock(CopyParsedBlockTag, GlobalCodeBlock& other)
1360             : CodeBlock(CopyParsedBlock, other, &m_unsharedSymbolTable)
1361             , m_unsharedSymbolTable(other.m_unsharedSymbolTable)
1362         {
1363         }
1364         
1365         GlobalCodeBlock(ScriptExecutable* ownerExecutable, CodeType codeType, JSGlobalObject* globalObject, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, PassOwnPtr<CodeBlock> alternative)
1366             : CodeBlock(ownerExecutable, codeType, globalObject, sourceProvider, sourceOffset, &m_unsharedSymbolTable, false, alternative)
1367         {
1368         }
1369
1370     private:
1371         SymbolTable m_unsharedSymbolTable;
1372     };
1373
1374     class ProgramCodeBlock : public GlobalCodeBlock {
1375     public:
1376         ProgramCodeBlock(CopyParsedBlockTag, ProgramCodeBlock& other)
1377             : GlobalCodeBlock(CopyParsedBlock, other)
1378         {
1379         }
1380
1381         ProgramCodeBlock(ProgramExecutable* ownerExecutable, CodeType codeType, JSGlobalObject* globalObject, PassRefPtr<SourceProvider> sourceProvider, PassOwnPtr<CodeBlock> alternative)
1382             : GlobalCodeBlock(ownerExecutable, codeType, globalObject, sourceProvider, 0, alternative)
1383         {
1384         }
1385         
1386 #if ENABLE(JIT)
1387     protected:
1388         virtual JSObject* compileOptimized(ExecState*, ScopeChainNode*);
1389         virtual void jettison();
1390         virtual bool jitCompileImpl(ExecState*);
1391         virtual CodeBlock* replacement();
1392         virtual DFG::CapabilityLevel canCompileWithDFGInternal();
1393 #endif
1394     };
1395
1396     class EvalCodeBlock : public GlobalCodeBlock {
1397     public:
1398         EvalCodeBlock(CopyParsedBlockTag, EvalCodeBlock& other)
1399             : GlobalCodeBlock(CopyParsedBlock, other)
1400             , m_baseScopeDepth(other.m_baseScopeDepth)
1401             , m_variables(other.m_variables)
1402         {
1403         }
1404         
1405         EvalCodeBlock(EvalExecutable* ownerExecutable, JSGlobalObject* globalObject, PassRefPtr<SourceProvider> sourceProvider, int baseScopeDepth, PassOwnPtr<CodeBlock> alternative)
1406             : GlobalCodeBlock(ownerExecutable, EvalCode, globalObject, sourceProvider, 0, alternative)
1407             , m_baseScopeDepth(baseScopeDepth)
1408         {
1409         }
1410
1411         int baseScopeDepth() const { return m_baseScopeDepth; }
1412
1413         const Identifier& variable(unsigned index) { return m_variables[index]; }
1414         unsigned numVariables() { return m_variables.size(); }
1415         void adoptVariables(Vector<Identifier>& variables)
1416         {
1417             ASSERT(m_variables.isEmpty());
1418             m_variables.swap(variables);
1419         }
1420         
1421 #if ENABLE(JIT)
1422     protected:
1423         virtual JSObject* compileOptimized(ExecState*, ScopeChainNode*);
1424         virtual void jettison();
1425         virtual bool jitCompileImpl(ExecState*);
1426         virtual CodeBlock* replacement();
1427         virtual DFG::CapabilityLevel canCompileWithDFGInternal();
1428 #endif
1429
1430     private:
1431         int m_baseScopeDepth;
1432         Vector<Identifier> m_variables;
1433     };
1434
1435     class FunctionCodeBlock : public CodeBlock {
1436     public:
1437         FunctionCodeBlock(CopyParsedBlockTag, FunctionCodeBlock& other)
1438             : CodeBlock(CopyParsedBlock, other, other.sharedSymbolTable())
1439         {
1440             // The fact that we have to do this is yucky, but is necessary because of the
1441             // class hierarchy issues described in the comment block for the main
1442             // constructor, below.
1443             sharedSymbolTable()->ref();
1444         }
1445
1446         // Rather than using the usual RefCounted::create idiom for SharedSymbolTable we just use new
1447         // as we need to initialise the CodeBlock before we could initialise any RefPtr to hold the shared
1448         // symbol table, so we just pass as a raw pointer with a ref count of 1.  We then manually deref
1449         // in the destructor.
1450         FunctionCodeBlock(FunctionExecutable* ownerExecutable, CodeType codeType, JSGlobalObject* globalObject, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, bool isConstructor, PassOwnPtr<CodeBlock> alternative = nullptr)
1451             : CodeBlock(ownerExecutable, codeType, globalObject, sourceProvider, sourceOffset, SharedSymbolTable::create().leakRef(), isConstructor, alternative)
1452         {
1453         }
1454         ~FunctionCodeBlock()
1455         {
1456             sharedSymbolTable()->deref();
1457         }
1458         
1459 #if ENABLE(JIT)
1460     protected:
1461         virtual JSObject* compileOptimized(ExecState*, ScopeChainNode*);
1462         virtual void jettison();
1463         virtual bool jitCompileImpl(ExecState*);
1464         virtual CodeBlock* replacement();
1465         virtual DFG::CapabilityLevel canCompileWithDFGInternal();
1466 #endif
1467     };
1468
1469     inline CodeBlock* baselineCodeBlockForInlineCallFrame(InlineCallFrame* inlineCallFrame)
1470     {
1471         ASSERT(inlineCallFrame);
1472         ExecutableBase* executable = inlineCallFrame->executable.get();
1473         ASSERT(executable->structure()->classInfo() == &FunctionExecutable::s_info);
1474         return static_cast<FunctionExecutable*>(executable)->baselineCodeBlockFor(inlineCallFrame->isCall ? CodeForCall : CodeForConstruct);
1475     }
1476     
1477     inline CodeBlock* baselineCodeBlockForOriginAndBaselineCodeBlock(const CodeOrigin& codeOrigin, CodeBlock* baselineCodeBlock)
1478     {
1479         if (codeOrigin.inlineCallFrame)
1480             return baselineCodeBlockForInlineCallFrame(codeOrigin.inlineCallFrame);
1481         return baselineCodeBlock;
1482     }
1483     
1484
1485     inline Register& ExecState::r(int index)
1486     {
1487         CodeBlock* codeBlock = this->codeBlock();
1488         if (codeBlock->isConstantRegisterIndex(index))
1489             return *reinterpret_cast<Register*>(&codeBlock->constantRegister(index));
1490         return this[index];
1491     }
1492
1493     inline Register& ExecState::uncheckedR(int index)
1494     {
1495         ASSERT(index < FirstConstantRegisterIndex);
1496         return this[index];
1497     }
1498
1499 #if ENABLE(DFG_JIT)
1500     inline bool ExecState::isInlineCallFrame()
1501     {
1502         if (LIKELY(!codeBlock() || codeBlock()->getJITType() != JITCode::DFGJIT))
1503             return false;
1504         return isInlineCallFrameSlow();
1505     }
1506 #endif
1507
1508 #if ENABLE(DFG_JIT)
1509     inline void DFGCodeBlocks::mark(void* candidateCodeBlock)
1510     {
1511         // We have to check for 0 and -1 because those are used by the HashMap as markers.
1512         uintptr_t value = reinterpret_cast<uintptr_t>(candidateCodeBlock);
1513         
1514         // This checks for both of those nasty cases in one go.
1515         // 0 + 1 = 1
1516         // -1 + 1 = 0
1517         if (value + 1 <= 1)
1518             return;
1519         
1520         HashSet<CodeBlock*>::iterator iter = m_set.find(static_cast<CodeBlock*>(candidateCodeBlock));
1521         if (iter == m_set.end())
1522             return;
1523         
1524         (*iter)->m_dfgData->mayBeExecuting = true;
1525     }
1526 #endif
1527     
1528     inline JSValue Structure::prototypeForLookup(CodeBlock* codeBlock) const
1529     {
1530         return prototypeForLookup(codeBlock->globalObject());
1531     }
1532
1533 } // namespace JSC
1534
1535 #endif // CodeBlock_h