8e6d07bc81072ad662e7ab46b356da335d8cca59
[WebKit-https.git] / Source / JavaScriptCore / bytecode / CodeBlock.h
1 /*
2  * Copyright (C) 2008, 2009, 2010 Apple Inc. All rights reserved.
3  * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1.  Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  * 2.  Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  * 3.  Neither the name of Apple Computer, Inc. ("Apple") nor the names of
15  *     its contributors may be used to endorse or promote products derived
16  *     from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
19  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21  * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
22  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29
30 #ifndef CodeBlock_h
31 #define CodeBlock_h
32
33 #include "BytecodeConventions.h"
34 #include "CallLinkInfo.h"
35 #include "CallReturnOffsetToBytecodeOffset.h"
36 #include "CodeOrigin.h"
37 #include "CodeType.h"
38 #include "CompactJITCodeMap.h"
39 #include "DFGCodeBlocks.h"
40 #include "DFGExitProfile.h"
41 #include "DFGOSREntry.h"
42 #include "DFGOSRExit.h"
43 #include "EvalCodeCache.h"
44 #include "ExpressionRangeInfo.h"
45 #include "GlobalResolveInfo.h"
46 #include "HandlerInfo.h"
47 #include "MethodCallLinkInfo.h"
48 #include "Options.h"
49 #include "Instruction.h"
50 #include "JITCode.h"
51 #include "JITWriteBarrier.h"
52 #include "JSGlobalObject.h"
53 #include "JumpTable.h"
54 #include "LLIntCallLinkInfo.h"
55 #include "LineInfo.h"
56 #include "Nodes.h"
57 #include "PredictionTracker.h"
58 #include "RegExpObject.h"
59 #include "StructureStubInfo.h"
60 #include "UString.h"
61 #include "UnconditionalFinalizer.h"
62 #include "ValueProfile.h"
63 #include <wtf/FastAllocBase.h>
64 #include <wtf/PassOwnPtr.h>
65 #include <wtf/RefPtr.h>
66 #include <wtf/SegmentedVector.h>
67 #include <wtf/Vector.h>
68 #include "StructureStubInfo.h"
69
70 namespace JSC {
71
72     class DFGCodeBlocks;
73     class ExecState;
74     class LLIntOffsetsExtractor;
75
76     inline int unmodifiedArgumentsRegister(int argumentsRegister) { return argumentsRegister - 1; }
77
78     static ALWAYS_INLINE int missingThisObjectMarker() { return std::numeric_limits<int>::max(); }
79
80     class CodeBlock : public UnconditionalFinalizer, public WeakReferenceHarvester {
81         WTF_MAKE_FAST_ALLOCATED;
82         friend class JIT;
83         friend class LLIntOffsetsExtractor;
84     public:
85         enum CopyParsedBlockTag { CopyParsedBlock };
86     protected:
87         CodeBlock(CopyParsedBlockTag, CodeBlock& other, SymbolTable*);
88         
89         CodeBlock(ScriptExecutable* ownerExecutable, CodeType, JSGlobalObject*, PassRefPtr<SourceProvider>, unsigned sourceOffset, SymbolTable*, bool isConstructor, PassOwnPtr<CodeBlock> alternative);
90
91         WriteBarrier<JSGlobalObject> m_globalObject;
92         Heap* m_heap;
93
94     public:
95         JS_EXPORT_PRIVATE virtual ~CodeBlock();
96         
97         int numParameters() const { return m_numParameters; }
98         void setNumParameters(int newValue);
99         void addParameter();
100         
101         int* addressOfNumParameters() { return &m_numParameters; }
102         static ptrdiff_t offsetOfNumParameters() { return OBJECT_OFFSETOF(CodeBlock, m_numParameters); }
103
104         CodeBlock* alternative() { return m_alternative.get(); }
105         PassOwnPtr<CodeBlock> releaseAlternative() { return m_alternative.release(); }
106         void setAlternative(PassOwnPtr<CodeBlock> alternative) { m_alternative = alternative; }
107         
108         CodeSpecializationKind specializationKind()
109         {
110             if (m_isConstructor)
111                 return CodeForConstruct;
112             return CodeForCall;
113         }
114         
115 #if ENABLE(JIT)
116         CodeBlock* baselineVersion()
117         {
118             CodeBlock* result = replacement();
119             if (!result)
120                 return 0; // This can happen if we're in the process of creating the baseline version.
121             while (result->alternative())
122                 result = result->alternative();
123             ASSERT(result);
124             ASSERT(JITCode::isBaselineCode(result->getJITType()));
125             return result;
126         }
127 #endif
128         
129         bool canProduceCopyWithBytecode() { return hasInstructions(); }
130
131         void visitAggregate(SlotVisitor&);
132
133         static void dumpStatistics();
134
135         void dump(ExecState*) const;
136         void printStructures(const Instruction*) const;
137         void printStructure(const char* name, const Instruction*, int operand) const;
138
139         bool isStrictMode() const { return m_isStrictMode; }
140
141         inline bool isKnownNotImmediate(int index)
142         {
143             if (index == m_thisRegister && !m_isStrictMode)
144                 return true;
145
146             if (isConstantRegisterIndex(index))
147                 return getConstant(index).isCell();
148
149             return false;
150         }
151
152         ALWAYS_INLINE bool isTemporaryRegisterIndex(int index)
153         {
154             return index >= m_numVars;
155         }
156
157         HandlerInfo* handlerForBytecodeOffset(unsigned bytecodeOffset);
158         int lineNumberForBytecodeOffset(unsigned bytecodeOffset);
159         void expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot, int& startOffset, int& endOffset);
160
161 #if ENABLE(JIT)
162
163         StructureStubInfo& getStubInfo(ReturnAddressPtr returnAddress)
164         {
165             return *(binarySearch<StructureStubInfo, void*, getStructureStubInfoReturnLocation>(m_structureStubInfos.begin(), m_structureStubInfos.size(), returnAddress.value()));
166         }
167
168         StructureStubInfo& getStubInfo(unsigned bytecodeIndex)
169         {
170             return *(binarySearch<StructureStubInfo, unsigned, getStructureStubInfoBytecodeIndex>(m_structureStubInfos.begin(), m_structureStubInfos.size(), bytecodeIndex));
171         }
172
173         CallLinkInfo& getCallLinkInfo(ReturnAddressPtr returnAddress)
174         {
175             return *(binarySearch<CallLinkInfo, void*, getCallLinkInfoReturnLocation>(m_callLinkInfos.begin(), m_callLinkInfos.size(), returnAddress.value()));
176         }
177         
178         CallLinkInfo& getCallLinkInfo(unsigned bytecodeIndex)
179         {
180             return *(binarySearch<CallLinkInfo, unsigned, getCallLinkInfoBytecodeIndex>(m_callLinkInfos.begin(), m_callLinkInfos.size(), bytecodeIndex));
181         }
182
183         MethodCallLinkInfo& getMethodCallLinkInfo(ReturnAddressPtr returnAddress)
184         {
185             return *(binarySearch<MethodCallLinkInfo, void*, getMethodCallLinkInfoReturnLocation>(m_methodCallLinkInfos.begin(), m_methodCallLinkInfos.size(), returnAddress.value()));
186         }
187
188         MethodCallLinkInfo& getMethodCallLinkInfo(unsigned bytecodeIndex)
189         {
190             return *(binarySearch<MethodCallLinkInfo, unsigned, getMethodCallLinkInfoBytecodeIndex>(m_methodCallLinkInfos.begin(), m_methodCallLinkInfos.size(), bytecodeIndex));
191         }
192
193         unsigned bytecodeOffset(ExecState*, ReturnAddressPtr);
194
195         unsigned bytecodeOffsetForCallAtIndex(unsigned index)
196         {
197             if (!m_rareData)
198                 return 1;
199             Vector<CallReturnOffsetToBytecodeOffset>& callIndices = m_rareData->m_callReturnIndexVector;
200             if (!callIndices.size())
201                 return 1;
202             ASSERT(index < m_rareData->m_callReturnIndexVector.size());
203             return m_rareData->m_callReturnIndexVector[index].bytecodeOffset;
204         }
205
206         void unlinkCalls();
207         
208         bool hasIncomingCalls() { return m_incomingCalls.begin() != m_incomingCalls.end(); }
209         
210         void linkIncomingCall(CallLinkInfo* incoming)
211         {
212             m_incomingCalls.push(incoming);
213         }
214 #if ENABLE(LLINT)
215         void linkIncomingCall(LLIntCallLinkInfo* incoming)
216         {
217             m_incomingLLIntCalls.push(incoming);
218         }
219 #endif // ENABLE(LLINT)
220         
221         void unlinkIncomingCalls();
222 #endif // ENABLE(JIT)
223
224 #if ENABLE(DFG_JIT) || ENABLE(LLINT)
225         void setJITCodeMap(PassOwnPtr<CompactJITCodeMap> jitCodeMap)
226         {
227             m_jitCodeMap = jitCodeMap;
228         }
229         CompactJITCodeMap* jitCodeMap()
230         {
231             return m_jitCodeMap.get();
232         }
233 #endif
234         
235 #if ENABLE(DFG_JIT)
236         void createDFGDataIfNecessary()
237         {
238             if (!!m_dfgData)
239                 return;
240             
241             m_dfgData = adoptPtr(new DFGData);
242         }
243         
244         DFG::OSREntryData* appendDFGOSREntryData(unsigned bytecodeIndex, unsigned machineCodeOffset)
245         {
246             createDFGDataIfNecessary();
247             DFG::OSREntryData entry;
248             entry.m_bytecodeIndex = bytecodeIndex;
249             entry.m_machineCodeOffset = machineCodeOffset;
250             m_dfgData->osrEntry.append(entry);
251             return &m_dfgData->osrEntry.last();
252         }
253         unsigned numberOfDFGOSREntries() const
254         {
255             if (!m_dfgData)
256                 return 0;
257             return m_dfgData->osrEntry.size();
258         }
259         DFG::OSREntryData* dfgOSREntryData(unsigned i) { return &m_dfgData->osrEntry[i]; }
260         DFG::OSREntryData* dfgOSREntryDataForBytecodeIndex(unsigned bytecodeIndex)
261         {
262             return binarySearch<DFG::OSREntryData, unsigned, DFG::getOSREntryDataBytecodeIndex>(m_dfgData->osrEntry.begin(), m_dfgData->osrEntry.size(), bytecodeIndex);
263         }
264         
265         void appendOSRExit(const DFG::OSRExit& osrExit)
266         {
267             createDFGDataIfNecessary();
268             m_dfgData->osrExit.append(osrExit);
269         }
270         
271         DFG::OSRExit& lastOSRExit()
272         {
273             return m_dfgData->osrExit.last();
274         }
275         
276         void appendSpeculationRecovery(const DFG::SpeculationRecovery& recovery)
277         {
278             createDFGDataIfNecessary();
279             m_dfgData->speculationRecovery.append(recovery);
280         }
281         
282         unsigned numberOfOSRExits()
283         {
284             if (!m_dfgData)
285                 return 0;
286             return m_dfgData->osrExit.size();
287         }
288         
289         unsigned numberOfSpeculationRecoveries()
290         {
291             if (!m_dfgData)
292                 return 0;
293             return m_dfgData->speculationRecovery.size();
294         }
295         
296         DFG::OSRExit& osrExit(unsigned index)
297         {
298             return m_dfgData->osrExit[index];
299         }
300         
301         DFG::SpeculationRecovery& speculationRecovery(unsigned index)
302         {
303             return m_dfgData->speculationRecovery[index];
304         }
305         
306         void appendWeakReference(JSCell* target)
307         {
308             createDFGDataIfNecessary();
309             m_dfgData->weakReferences.append(WriteBarrier<JSCell>(*globalData(), ownerExecutable(), target));
310         }
311         
312         void shrinkWeakReferencesToFit()
313         {
314             if (!m_dfgData)
315                 return;
316             m_dfgData->weakReferences.shrinkToFit();
317         }
318         
319         void appendWeakReferenceTransition(JSCell* codeOrigin, JSCell* from, JSCell* to)
320         {
321             createDFGDataIfNecessary();
322             m_dfgData->transitions.append(
323                 WeakReferenceTransition(*globalData(), ownerExecutable(), codeOrigin, from, to));
324         }
325         
326         void shrinkWeakReferenceTransitionsToFit()
327         {
328             if (!m_dfgData)
329                 return;
330             m_dfgData->transitions.shrinkToFit();
331         }
332 #endif
333
334         unsigned bytecodeOffset(Instruction* returnAddress)
335         {
336             ASSERT(returnAddress >= instructions().begin() && returnAddress < instructions().end());
337             return static_cast<Instruction*>(returnAddress) - instructions().begin();
338         }
339
340         void setIsNumericCompareFunction(bool isNumericCompareFunction) { m_isNumericCompareFunction = isNumericCompareFunction; }
341         bool isNumericCompareFunction() { return m_isNumericCompareFunction; }
342
343         bool hasInstructions() const { return !!m_instructions; }
344         unsigned numberOfInstructions() const { return !m_instructions ? 0 : m_instructions->m_instructions.size(); }
345         Vector<Instruction>& instructions() { return m_instructions->m_instructions; }
346         const Vector<Instruction>& instructions() const { return m_instructions->m_instructions; }
347         void discardBytecode() { m_instructions.clear(); }
348         void discardBytecodeLater()
349         {
350             m_shouldDiscardBytecode = true;
351         }
352         
353         bool usesOpcode(OpcodeID);
354
355         unsigned instructionCount() { return m_instructionCount; }
356         void setInstructionCount(unsigned instructionCount) { m_instructionCount = instructionCount; }
357
358 #if ENABLE(JIT)
359         void setJITCode(const JITCode& code, MacroAssemblerCodePtr codeWithArityCheck)
360         {
361             m_jitCode = code;
362             m_jitCodeWithArityCheck = codeWithArityCheck;
363 #if ENABLE(DFG_JIT)
364             if (m_jitCode.jitType() == JITCode::DFGJIT) {
365                 createDFGDataIfNecessary();
366                 m_globalData->heap.m_dfgCodeBlocks.m_set.add(this);
367             }
368 #endif
369         }
370         JITCode& getJITCode() { return m_jitCode; }
371         MacroAssemblerCodePtr getJITCodeWithArityCheck() { return m_jitCodeWithArityCheck; }
372         JITCode::JITType getJITType() { return m_jitCode.jitType(); }
373         ExecutableMemoryHandle* executableMemory() { return getJITCode().getExecutableMemory(); }
374         virtual JSObject* compileOptimized(ExecState*, ScopeChainNode*) = 0;
375         virtual void jettison() = 0;
376         bool jitCompile(JSGlobalData& globalData)
377         {
378             if (getJITType() != JITCode::InterpreterThunk) {
379                 ASSERT(getJITType() == JITCode::BaselineJIT);
380                 return false;
381             }
382 #if ENABLE(JIT)
383             jitCompileImpl(globalData);
384             return true;
385 #else
386             UNUSED_PARAM(globalData);
387             return false;
388 #endif
389         }
390         virtual CodeBlock* replacement() = 0;
391
392         enum CompileWithDFGState {
393             CompileWithDFGFalse,
394             CompileWithDFGTrue,
395             CompileWithDFGUnset
396         };
397
398         virtual bool canCompileWithDFGInternal() = 0;
399         bool canCompileWithDFG()
400         {
401             bool result = canCompileWithDFGInternal();
402             m_canCompileWithDFGState = result ? CompileWithDFGTrue : CompileWithDFGFalse;
403             return result;
404         }
405         CompileWithDFGState canCompileWithDFGState() { return m_canCompileWithDFGState; }
406
407         bool hasOptimizedReplacement()
408         {
409             ASSERT(JITCode::isBaselineCode(getJITType()));
410             bool result = replacement()->getJITType() > getJITType();
411 #if !ASSERT_DISABLED
412             if (result)
413                 ASSERT(replacement()->getJITType() == JITCode::DFGJIT);
414             else {
415                 ASSERT(JITCode::isBaselineCode(replacement()->getJITType()));
416                 ASSERT(replacement() == this);
417             }
418 #endif
419             return result;
420         }
421 #else
422         JITCode::JITType getJITType() { return JITCode::BaselineJIT; }
423 #endif
424
425         ScriptExecutable* ownerExecutable() const { return m_ownerExecutable.get(); }
426
427         void setGlobalData(JSGlobalData* globalData) { m_globalData = globalData; }
428         JSGlobalData* globalData() { return m_globalData; }
429
430         void setThisRegister(int thisRegister) { m_thisRegister = thisRegister; }
431         int thisRegister() const { return m_thisRegister; }
432
433         void setNeedsFullScopeChain(bool needsFullScopeChain) { m_needsFullScopeChain = needsFullScopeChain; }
434         bool needsFullScopeChain() const { return m_needsFullScopeChain; }
435         void setUsesEval(bool usesEval) { m_usesEval = usesEval; }
436         bool usesEval() const { return m_usesEval; }
437         
438         void setArgumentsRegister(int argumentsRegister)
439         {
440             ASSERT(argumentsRegister != -1);
441             m_argumentsRegister = argumentsRegister;
442             ASSERT(usesArguments());
443         }
444         int argumentsRegister()
445         {
446             ASSERT(usesArguments());
447             return m_argumentsRegister;
448         }
449         void setActivationRegister(int activationRegister)
450         {
451             m_activationRegister = activationRegister;
452         }
453         int activationRegister()
454         {
455             ASSERT(needsFullScopeChain());
456             return m_activationRegister;
457         }
458         bool usesArguments() const { return m_argumentsRegister != -1; }
459
460         CodeType codeType() const { return m_codeType; }
461
462         SourceProvider* source() const { return m_source.get(); }
463         unsigned sourceOffset() const { return m_sourceOffset; }
464
465         size_t numberOfJumpTargets() const { return m_jumpTargets.size(); }
466         void addJumpTarget(unsigned jumpTarget) { m_jumpTargets.append(jumpTarget); }
467         unsigned jumpTarget(int index) const { return m_jumpTargets[index]; }
468         unsigned lastJumpTarget() const { return m_jumpTargets.last(); }
469
470         void createActivation(CallFrame*);
471
472         void clearEvalCache();
473
474         void addPropertyAccessInstruction(unsigned propertyAccessInstruction)
475         {
476             m_propertyAccessInstructions.append(propertyAccessInstruction);
477         }
478         void addGlobalResolveInstruction(unsigned globalResolveInstruction)
479         {
480             m_globalResolveInstructions.append(globalResolveInstruction);
481         }
482         bool hasGlobalResolveInstructionAtBytecodeOffset(unsigned bytecodeOffset);
483 #if ENABLE(LLINT)
484         LLIntCallLinkInfo* addLLIntCallLinkInfo()
485         {
486             m_llintCallLinkInfos.append(LLIntCallLinkInfo());
487             return &m_llintCallLinkInfos.last();
488         }
489 #endif
490 #if ENABLE(JIT)
491         void setNumberOfStructureStubInfos(size_t size) { m_structureStubInfos.grow(size); }
492         size_t numberOfStructureStubInfos() const { return m_structureStubInfos.size(); }
493         StructureStubInfo& structureStubInfo(int index) { return m_structureStubInfos[index]; }
494
495         void addGlobalResolveInfo(unsigned globalResolveInstruction)
496         {
497             m_globalResolveInfos.append(GlobalResolveInfo(globalResolveInstruction));
498         }
499         GlobalResolveInfo& globalResolveInfo(int index) { return m_globalResolveInfos[index]; }
500         bool hasGlobalResolveInfoAtBytecodeOffset(unsigned bytecodeOffset);
501
502         void setNumberOfCallLinkInfos(size_t size) { m_callLinkInfos.grow(size); }
503         size_t numberOfCallLinkInfos() const { return m_callLinkInfos.size(); }
504         CallLinkInfo& callLinkInfo(int index) { return m_callLinkInfos[index]; }
505
506         void addMethodCallLinkInfos(unsigned n) { ASSERT(m_globalData->canUseJIT()); m_methodCallLinkInfos.grow(n); }
507         MethodCallLinkInfo& methodCallLinkInfo(int index) { return m_methodCallLinkInfos[index]; }
508         size_t numberOfMethodCallLinkInfos() { return m_methodCallLinkInfos.size(); }
509 #endif
510         
511 #if ENABLE(VALUE_PROFILER)
512         unsigned numberOfArgumentValueProfiles()
513         {
514             ASSERT(m_numParameters >= 0);
515             ASSERT(m_argumentValueProfiles.size() == static_cast<unsigned>(m_numParameters));
516             return m_argumentValueProfiles.size();
517         }
518         ValueProfile* valueProfileForArgument(unsigned argumentIndex)
519         {
520             ValueProfile* result = &m_argumentValueProfiles[argumentIndex];
521             ASSERT(result->m_bytecodeOffset == -1);
522             return result;
523         }
524         
525         ValueProfile* addValueProfile(int bytecodeOffset)
526         {
527             ASSERT(bytecodeOffset != -1);
528             ASSERT(m_valueProfiles.isEmpty() || m_valueProfiles.last().m_bytecodeOffset < bytecodeOffset);
529             m_valueProfiles.append(ValueProfile(bytecodeOffset));
530             return &m_valueProfiles.last();
531         }
532         unsigned numberOfValueProfiles() { return m_valueProfiles.size(); }
533         ValueProfile* valueProfile(int index)
534         {
535             ValueProfile* result = &m_valueProfiles[index];
536             ASSERT(result->m_bytecodeOffset != -1);
537             return result;
538         }
539         ValueProfile* valueProfileForBytecodeOffset(int bytecodeOffset)
540         {
541             ValueProfile* result = WTF::genericBinarySearch<ValueProfile, int, getValueProfileBytecodeOffset>(m_valueProfiles, m_valueProfiles.size(), bytecodeOffset);
542             ASSERT(result->m_bytecodeOffset != -1);
543             ASSERT(!hasInstructions()
544                    || instructions()[bytecodeOffset + opcodeLength(
545                            m_globalData->interpreter->getOpcodeID(
546                                instructions()[
547                                    bytecodeOffset].u.opcode)) - 1].u.profile == result);
548             return result;
549         }
550         PredictedType valueProfilePredictionForBytecodeOffset(int bytecodeOffset)
551         {
552             return valueProfileForBytecodeOffset(bytecodeOffset)->computeUpdatedPrediction();
553         }
554         
555         unsigned totalNumberOfValueProfiles()
556         {
557             return numberOfArgumentValueProfiles() + numberOfValueProfiles();
558         }
559         ValueProfile* getFromAllValueProfiles(unsigned index)
560         {
561             if (index < numberOfArgumentValueProfiles())
562                 return valueProfileForArgument(index);
563             return valueProfile(index - numberOfArgumentValueProfiles());
564         }
565         
566         RareCaseProfile* addRareCaseProfile(int bytecodeOffset)
567         {
568             m_rareCaseProfiles.append(RareCaseProfile(bytecodeOffset));
569             return &m_rareCaseProfiles.last();
570         }
571         unsigned numberOfRareCaseProfiles() { return m_rareCaseProfiles.size(); }
572         RareCaseProfile* rareCaseProfile(int index) { return &m_rareCaseProfiles[index]; }
573         RareCaseProfile* rareCaseProfileForBytecodeOffset(int bytecodeOffset)
574         {
575             return WTF::genericBinarySearch<RareCaseProfile, int, getRareCaseProfileBytecodeOffset>(m_rareCaseProfiles, m_rareCaseProfiles.size(), bytecodeOffset);
576         }
577         
578         bool likelyToTakeSlowCase(int bytecodeOffset)
579         {
580             if (!numberOfRareCaseProfiles())
581                 return false;
582             unsigned value = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
583             return value >= Options::likelyToTakeSlowCaseMinimumCount && static_cast<double>(value) / m_executionEntryCount >= Options::likelyToTakeSlowCaseThreshold;
584         }
585         
586         bool couldTakeSlowCase(int bytecodeOffset)
587         {
588             if (!numberOfRareCaseProfiles())
589                 return false;
590             unsigned value = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
591             return value >= Options::couldTakeSlowCaseMinimumCount && static_cast<double>(value) / m_executionEntryCount >= Options::couldTakeSlowCaseThreshold;
592         }
593         
594         RareCaseProfile* addSpecialFastCaseProfile(int bytecodeOffset)
595         {
596             m_specialFastCaseProfiles.append(RareCaseProfile(bytecodeOffset));
597             return &m_specialFastCaseProfiles.last();
598         }
599         unsigned numberOfSpecialFastCaseProfiles() { return m_specialFastCaseProfiles.size(); }
600         RareCaseProfile* specialFastCaseProfile(int index) { return &m_specialFastCaseProfiles[index]; }
601         RareCaseProfile* specialFastCaseProfileForBytecodeOffset(int bytecodeOffset)
602         {
603             return WTF::genericBinarySearch<RareCaseProfile, int, getRareCaseProfileBytecodeOffset>(m_specialFastCaseProfiles, m_specialFastCaseProfiles.size(), bytecodeOffset);
604         }
605         
606         bool likelyToTakeSpecialFastCase(int bytecodeOffset)
607         {
608             if (!numberOfRareCaseProfiles())
609                 return false;
610             unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
611             return specialFastCaseCount >= Options::likelyToTakeSlowCaseMinimumCount && static_cast<double>(specialFastCaseCount) / m_executionEntryCount >= Options::likelyToTakeSlowCaseThreshold;
612         }
613         
614         bool likelyToTakeDeepestSlowCase(int bytecodeOffset)
615         {
616             if (!numberOfRareCaseProfiles())
617                 return false;
618             unsigned slowCaseCount = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
619             unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
620             unsigned value = slowCaseCount - specialFastCaseCount;
621             return value >= Options::likelyToTakeSlowCaseMinimumCount && static_cast<double>(value) / m_executionEntryCount >= Options::likelyToTakeSlowCaseThreshold;
622         }
623         
624         bool likelyToTakeAnySlowCase(int bytecodeOffset)
625         {
626             if (!numberOfRareCaseProfiles())
627                 return false;
628             unsigned slowCaseCount = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
629             unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
630             unsigned value = slowCaseCount + specialFastCaseCount;
631             return value >= Options::likelyToTakeSlowCaseMinimumCount && static_cast<double>(value) / m_executionEntryCount >= Options::likelyToTakeSlowCaseThreshold;
632         }
633         
634         unsigned executionEntryCount() const { return m_executionEntryCount; }
635 #endif
636
637         unsigned globalResolveInfoCount() const
638         {
639 #if ENABLE(JIT)    
640             if (m_globalData->canUseJIT())
641                 return m_globalResolveInfos.size();
642 #endif
643             return 0;
644         }
645
646         // Exception handling support
647
648         size_t numberOfExceptionHandlers() const { return m_rareData ? m_rareData->m_exceptionHandlers.size() : 0; }
649         void addExceptionHandler(const HandlerInfo& hanler) { createRareDataIfNecessary(); return m_rareData->m_exceptionHandlers.append(hanler); }
650         HandlerInfo& exceptionHandler(int index) { ASSERT(m_rareData); return m_rareData->m_exceptionHandlers[index]; }
651
652         void addExpressionInfo(const ExpressionRangeInfo& expressionInfo)
653         {
654             createRareDataIfNecessary();
655             m_rareData->m_expressionInfo.append(expressionInfo);
656         }
657
658         void addLineInfo(unsigned bytecodeOffset, int lineNo)
659         {
660             createRareDataIfNecessary();
661             Vector<LineInfo>& lineInfo = m_rareData->m_lineInfo;
662             if (!lineInfo.size() || lineInfo.last().lineNumber != lineNo) {
663                 LineInfo info = { bytecodeOffset, lineNo };
664                 lineInfo.append(info);
665             }
666         }
667
668         bool hasExpressionInfo() { return m_rareData && m_rareData->m_expressionInfo.size(); }
669         bool hasLineInfo() { return m_rareData && m_rareData->m_lineInfo.size(); }
670         //  We only generate exception handling info if the user is debugging
671         // (and may want line number info), or if the function contains exception handler.
672         bool needsCallReturnIndices()
673         {
674             return m_rareData &&
675                 (m_rareData->m_expressionInfo.size() || m_rareData->m_lineInfo.size() || m_rareData->m_exceptionHandlers.size());
676         }
677
678 #if ENABLE(JIT)
679         Vector<CallReturnOffsetToBytecodeOffset>& callReturnIndexVector()
680         {
681             createRareDataIfNecessary();
682             return m_rareData->m_callReturnIndexVector;
683         }
684 #endif
685
686 #if ENABLE(DFG_JIT)
687         SegmentedVector<InlineCallFrame, 4>& inlineCallFrames()
688         {
689             createRareDataIfNecessary();
690             return m_rareData->m_inlineCallFrames;
691         }
692         
693         Vector<CodeOriginAtCallReturnOffset>& codeOrigins()
694         {
695             createRareDataIfNecessary();
696             return m_rareData->m_codeOrigins;
697         }
698         
699         // Having code origins implies that there has been some inlining.
700         bool hasCodeOrigins()
701         {
702             return m_rareData && !!m_rareData->m_codeOrigins.size();
703         }
704         
705         bool codeOriginForReturn(ReturnAddressPtr returnAddress, CodeOrigin& codeOrigin)
706         {
707             if (!hasCodeOrigins())
708                 return false;
709             unsigned offset = getJITCode().offsetOf(returnAddress.value());
710             CodeOriginAtCallReturnOffset* entry = binarySearch<CodeOriginAtCallReturnOffset, unsigned, getCallReturnOffsetForCodeOrigin>(codeOrigins().begin(), codeOrigins().size(), offset, WTF::KeyMustNotBePresentInArray);
711             if (entry->callReturnOffset != offset)
712                 return false;
713             codeOrigin = entry->codeOrigin;
714             return true;
715         }
716         
717         CodeOrigin codeOrigin(unsigned index)
718         {
719             ASSERT(m_rareData);
720             return m_rareData->m_codeOrigins[index].codeOrigin;
721         }
722         
723         bool addFrequentExitSite(const DFG::FrequentExitSite& site)
724         {
725             ASSERT(JITCode::isBaselineCode(getJITType()));
726             return m_exitProfile.add(site);
727         }
728
729         DFG::ExitProfile& exitProfile() { return m_exitProfile; }
730 #endif
731
732         // Constant Pool
733
734         size_t numberOfIdentifiers() const { return m_identifiers.size(); }
735         void addIdentifier(const Identifier& i) { return m_identifiers.append(i); }
736         Identifier& identifier(int index) { return m_identifiers[index]; }
737
738         size_t numberOfConstantRegisters() const { return m_constantRegisters.size(); }
739         unsigned addConstant(JSValue v)
740         {
741             unsigned result = m_constantRegisters.size();
742             m_constantRegisters.append(WriteBarrier<Unknown>());
743             m_constantRegisters.last().set(m_globalObject->globalData(), m_ownerExecutable.get(), v);
744             return result;
745         }
746         unsigned addOrFindConstant(JSValue);
747         WriteBarrier<Unknown>& constantRegister(int index) { return m_constantRegisters[index - FirstConstantRegisterIndex]; }
748         ALWAYS_INLINE bool isConstantRegisterIndex(int index) const { return index >= FirstConstantRegisterIndex; }
749         ALWAYS_INLINE JSValue getConstant(int index) const { return m_constantRegisters[index - FirstConstantRegisterIndex].get(); }
750
751         unsigned addFunctionDecl(FunctionExecutable* n)
752         {
753             unsigned size = m_functionDecls.size();
754             m_functionDecls.append(WriteBarrier<FunctionExecutable>());
755             m_functionDecls.last().set(m_globalObject->globalData(), m_ownerExecutable.get(), n);
756             return size;
757         }
758         FunctionExecutable* functionDecl(int index) { return m_functionDecls[index].get(); }
759         int numberOfFunctionDecls() { return m_functionDecls.size(); }
760         unsigned addFunctionExpr(FunctionExecutable* n)
761         {
762             unsigned size = m_functionExprs.size();
763             m_functionExprs.append(WriteBarrier<FunctionExecutable>());
764             m_functionExprs.last().set(m_globalObject->globalData(), m_ownerExecutable.get(), n);
765             return size;
766         }
767         FunctionExecutable* functionExpr(int index) { return m_functionExprs[index].get(); }
768
769         unsigned addRegExp(RegExp* r)
770         {
771             createRareDataIfNecessary();
772             unsigned size = m_rareData->m_regexps.size();
773             m_rareData->m_regexps.append(WriteBarrier<RegExp>(*m_globalData, ownerExecutable(), r));
774             return size;
775         }
776         unsigned numberOfRegExps() const
777         {
778             if (!m_rareData)
779                 return 0;
780             return m_rareData->m_regexps.size();
781         }
782         RegExp* regexp(int index) const { ASSERT(m_rareData); return m_rareData->m_regexps[index].get(); }
783
784         unsigned addConstantBuffer(unsigned length)
785         {
786             createRareDataIfNecessary();
787             unsigned size = m_rareData->m_constantBuffers.size();
788             m_rareData->m_constantBuffers.append(Vector<JSValue>(length));
789             return size;
790         }
791
792         JSValue* constantBuffer(unsigned index)
793         {
794             ASSERT(m_rareData);
795             return m_rareData->m_constantBuffers[index].data();
796         }
797
798         JSGlobalObject* globalObject() { return m_globalObject.get(); }
799         
800         JSGlobalObject* globalObjectFor(CodeOrigin codeOrigin)
801         {
802             if (!codeOrigin.inlineCallFrame)
803                 return globalObject();
804             // FIXME: if we ever inline based on executable not function, this code will need to change.
805             return codeOrigin.inlineCallFrame->callee->scope()->globalObject.get();
806         }
807
808         // Jump Tables
809
810         size_t numberOfImmediateSwitchJumpTables() const { return m_rareData ? m_rareData->m_immediateSwitchJumpTables.size() : 0; }
811         SimpleJumpTable& addImmediateSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_immediateSwitchJumpTables.append(SimpleJumpTable()); return m_rareData->m_immediateSwitchJumpTables.last(); }
812         SimpleJumpTable& immediateSwitchJumpTable(int tableIndex) { ASSERT(m_rareData); return m_rareData->m_immediateSwitchJumpTables[tableIndex]; }
813
814         size_t numberOfCharacterSwitchJumpTables() const { return m_rareData ? m_rareData->m_characterSwitchJumpTables.size() : 0; }
815         SimpleJumpTable& addCharacterSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_characterSwitchJumpTables.append(SimpleJumpTable()); return m_rareData->m_characterSwitchJumpTables.last(); }
816         SimpleJumpTable& characterSwitchJumpTable(int tableIndex) { ASSERT(m_rareData); return m_rareData->m_characterSwitchJumpTables[tableIndex]; }
817
818         size_t numberOfStringSwitchJumpTables() const { return m_rareData ? m_rareData->m_stringSwitchJumpTables.size() : 0; }
819         StringJumpTable& addStringSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_stringSwitchJumpTables.append(StringJumpTable()); return m_rareData->m_stringSwitchJumpTables.last(); }
820         StringJumpTable& stringSwitchJumpTable(int tableIndex) { ASSERT(m_rareData); return m_rareData->m_stringSwitchJumpTables[tableIndex]; }
821
822
823         SymbolTable* symbolTable() { return m_symbolTable; }
824         SharedSymbolTable* sharedSymbolTable() { ASSERT(m_codeType == FunctionCode); return static_cast<SharedSymbolTable*>(m_symbolTable); }
825
826         EvalCodeCache& evalCodeCache() { createRareDataIfNecessary(); return m_rareData->m_evalCodeCache; }
827
828         void shrinkToFit();
829         
830         void copyPostParseDataFrom(CodeBlock* alternative);
831         void copyPostParseDataFromAlternative();
832         
833         // Functions for controlling when JITting kicks in, in a mixed mode
834         // execution world.
835         
836         void dontJITAnytimeSoon()
837         {
838             m_llintExecuteCounter = Options::executionCounterValueForDontJITAnytimeSoon;
839         }
840         
841         void jitAfterWarmUp()
842         {
843             m_llintExecuteCounter = Options::executionCounterValueForJITAfterWarmUp;
844         }
845         
846         void jitSoon()
847         {
848             m_llintExecuteCounter = Options::executionCounterValueForJITSoon;
849         }
850         
851         int32_t llintExecuteCounter() const
852         {
853             return m_llintExecuteCounter;
854         }
855         
856         // Functions for controlling when tiered compilation kicks in. This
857         // controls both when the optimizing compiler is invoked and when OSR
858         // entry happens. Two triggers exist: the loop trigger and the return
859         // trigger. In either case, when an addition to m_jitExecuteCounter
860         // causes it to become non-negative, the optimizing compiler is
861         // invoked. This includes a fast check to see if this CodeBlock has
862         // already been optimized (i.e. replacement() returns a CodeBlock
863         // that was optimized with a higher tier JIT than this one). In the
864         // case of the loop trigger, if the optimized compilation succeeds
865         // (or has already succeeded in the past) then OSR is attempted to
866         // redirect program flow into the optimized code.
867         
868         // These functions are called from within the optimization triggers,
869         // and are used as a single point at which we define the heuristics
870         // for how much warm-up is mandated before the next optimization
871         // trigger files. All CodeBlocks start out with optimizeAfterWarmUp(),
872         // as this is called from the CodeBlock constructor.
873         
874         // When we observe a lot of speculation failures, we trigger a
875         // reoptimization. But each time, we increase the optimization trigger
876         // to avoid thrashing.
877         unsigned reoptimizationRetryCounter() const
878         {
879             ASSERT(m_reoptimizationRetryCounter <= Options::reoptimizationRetryCounterMax);
880             return m_reoptimizationRetryCounter;
881         }
882         
883         void countReoptimization()
884         {
885             m_reoptimizationRetryCounter++;
886             if (m_reoptimizationRetryCounter > Options::reoptimizationRetryCounterMax)
887                 m_reoptimizationRetryCounter = Options::reoptimizationRetryCounterMax;
888         }
889         
890         int32_t counterValueForOptimizeAfterWarmUp()
891         {
892             return Options::executionCounterValueForOptimizeAfterWarmUp << reoptimizationRetryCounter();
893         }
894         
895         int32_t counterValueForOptimizeAfterLongWarmUp()
896         {
897             return Options::executionCounterValueForOptimizeAfterLongWarmUp << reoptimizationRetryCounter();
898         }
899         
900         int32_t* addressOfJITExecuteCounter()
901         {
902             return &m_jitExecuteCounter;
903         }
904         
905         static ptrdiff_t offsetOfJITExecuteCounter() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter); }
906
907         int32_t jitExecuteCounter() const { return m_jitExecuteCounter; }
908         
909         unsigned optimizationDelayCounter() const { return m_optimizationDelayCounter; }
910         
911         // Call this to force the next optimization trigger to fire. This is
912         // rarely wise, since optimization triggers are typically more
913         // expensive than executing baseline code.
914         void optimizeNextInvocation()
915         {
916             m_jitExecuteCounter = Options::executionCounterValueForOptimizeNextInvocation;
917         }
918         
919         // Call this to prevent optimization from happening again. Note that
920         // optimization will still happen after roughly 2^29 invocations,
921         // so this is really meant to delay that as much as possible. This
922         // is called if optimization failed, and we expect it to fail in
923         // the future as well.
924         void dontOptimizeAnytimeSoon()
925         {
926             m_jitExecuteCounter = Options::executionCounterValueForDontOptimizeAnytimeSoon;
927         }
928         
929         // Call this to reinitialize the counter to its starting state,
930         // forcing a warm-up to happen before the next optimization trigger
931         // fires. This is called in the CodeBlock constructor. It also
932         // makes sense to call this if an OSR exit occurred. Note that
933         // OSR exit code is code generated, so the value of the execute
934         // counter that this corresponds to is also available directly.
935         void optimizeAfterWarmUp()
936         {
937             m_jitExecuteCounter = counterValueForOptimizeAfterWarmUp();
938         }
939         
940         // Call this to force an optimization trigger to fire only after
941         // a lot of warm-up.
942         void optimizeAfterLongWarmUp()
943         {
944             m_jitExecuteCounter = counterValueForOptimizeAfterLongWarmUp();
945         }
946         
947         // Call this to cause an optimization trigger to fire soon, but
948         // not necessarily the next one. This makes sense if optimization
949         // succeeds. Successfuly optimization means that all calls are
950         // relinked to the optimized code, so this only affects call
951         // frames that are still executing this CodeBlock. The value here
952         // is tuned to strike a balance between the cost of OSR entry
953         // (which is too high to warrant making every loop back edge to
954         // trigger OSR immediately) and the cost of executing baseline
955         // code (which is high enough that we don't necessarily want to
956         // have a full warm-up). The intuition for calling this instead of
957         // optimizeNextInvocation() is for the case of recursive functions
958         // with loops. Consider that there may be N call frames of some
959         // recursive function, for a reasonably large value of N. The top
960         // one triggers optimization, and then returns, and then all of
961         // the others return. We don't want optimization to be triggered on
962         // each return, as that would be superfluous. It only makes sense
963         // to trigger optimization if one of those functions becomes hot
964         // in the baseline code.
965         void optimizeSoon()
966         {
967             m_jitExecuteCounter = Options::executionCounterValueForOptimizeSoon << reoptimizationRetryCounter();
968         }
969         
970         // The speculative JIT tracks its success rate, so that we can
971         // decide when to reoptimize. It's interesting to note that these
972         // counters may overflow without any protection. The success
973         // counter will overflow before the fail one does, becuase the
974         // fail one is used as a trigger to reoptimize. So the worst case
975         // is that the success counter overflows and we reoptimize without
976         // needing to. But this is harmless. If a method really did
977         // execute 2^32 times then compiling it again probably won't hurt
978         // anyone.
979         
980         void countSpeculationSuccess()
981         {
982             m_speculativeSuccessCounter++;
983         }
984         
985         void countSpeculationFailure()
986         {
987             m_speculativeFailCounter++;
988         }
989         
990         uint32_t speculativeSuccessCounter() const { return m_speculativeSuccessCounter; }
991         uint32_t speculativeFailCounter() const { return m_speculativeFailCounter; }
992         
993         uint32_t* addressOfSpeculativeSuccessCounter() { return &m_speculativeSuccessCounter; }
994         uint32_t* addressOfSpeculativeFailCounter() { return &m_speculativeFailCounter; }
995         
996         static ptrdiff_t offsetOfSpeculativeSuccessCounter() { return OBJECT_OFFSETOF(CodeBlock, m_speculativeSuccessCounter); }
997         static ptrdiff_t offsetOfSpeculativeFailCounter() { return OBJECT_OFFSETOF(CodeBlock, m_speculativeFailCounter); }
998
999 #if ENABLE(JIT)
1000         // The number of failures that triggers the use of the ratio.
1001         unsigned largeFailCountThreshold() { return Options::largeFailCountThresholdBase << baselineVersion()->reoptimizationRetryCounter(); }
1002         unsigned largeFailCountThresholdForLoop() { return Options::largeFailCountThresholdBaseForLoop << baselineVersion()->reoptimizationRetryCounter(); }
1003
1004         bool shouldReoptimizeNow()
1005         {
1006             return Options::desiredSpeculativeSuccessFailRatio * speculativeFailCounter() >= speculativeSuccessCounter() && speculativeFailCounter() >= largeFailCountThreshold();
1007         }
1008
1009         bool shouldReoptimizeFromLoopNow()
1010         {
1011             return Options::desiredSpeculativeSuccessFailRatio * speculativeFailCounter() >= speculativeSuccessCounter() && speculativeFailCounter() >= largeFailCountThresholdForLoop();
1012         }
1013 #endif
1014
1015 #if ENABLE(VALUE_PROFILER)
1016         bool shouldOptimizeNow();
1017 #else
1018         bool shouldOptimizeNow() { return false; }
1019 #endif
1020         
1021 #if ENABLE(JIT)
1022         void reoptimize()
1023         {
1024             ASSERT(replacement() != this);
1025             ASSERT(replacement()->alternative() == this);
1026             replacement()->tallyFrequentExitSites();
1027             replacement()->jettison();
1028             countReoptimization();
1029             optimizeAfterWarmUp();
1030         }
1031 #endif
1032
1033 #if ENABLE(VERBOSE_VALUE_PROFILE)
1034         void dumpValueProfiles();
1035 #endif
1036         
1037         // FIXME: Make these remaining members private.
1038
1039         int m_numCalleeRegisters;
1040         int m_numVars;
1041         int m_numCapturedVars;
1042         bool m_isConstructor;
1043
1044         // This is public because otherwise we would have many friends.
1045         bool m_shouldDiscardBytecode;
1046
1047     protected:
1048 #if ENABLE(JIT)
1049         virtual void jitCompileImpl(JSGlobalData&) = 0;
1050 #endif
1051         virtual void visitWeakReferences(SlotVisitor&);
1052         virtual void finalizeUnconditionally();
1053         
1054     private:
1055         friend class DFGCodeBlocks;
1056         
1057 #if ENABLE(DFG_JIT)
1058         void tallyFrequentExitSites();
1059 #else
1060         void tallyFrequentExitSites() { }
1061 #endif
1062         
1063         void dump(ExecState*, const Vector<Instruction>::const_iterator& begin, Vector<Instruction>::const_iterator&) const;
1064
1065         CString registerName(ExecState*, int r) const;
1066         void printUnaryOp(ExecState*, int location, Vector<Instruction>::const_iterator&, const char* op) const;
1067         void printBinaryOp(ExecState*, int location, Vector<Instruction>::const_iterator&, const char* op) const;
1068         void printConditionalJump(ExecState*, const Vector<Instruction>::const_iterator&, Vector<Instruction>::const_iterator&, int location, const char* op) const;
1069         void printGetByIdOp(ExecState*, int location, Vector<Instruction>::const_iterator&, const char* op) const;
1070         void printCallOp(ExecState*, int location, Vector<Instruction>::const_iterator&, const char* op) const;
1071         void printPutByIdOp(ExecState*, int location, Vector<Instruction>::const_iterator&, const char* op) const;
1072         void visitStructures(SlotVisitor&, Instruction* vPC) const;
1073         
1074 #if ENABLE(DFG_JIT)
1075         bool shouldImmediatelyAssumeLivenessDuringScan()
1076         {
1077             // Null m_dfgData means that this is a baseline JIT CodeBlock. Baseline JIT
1078             // CodeBlocks don't need to be jettisoned when their weak references go
1079             // stale. So if a basline JIT CodeBlock gets scanned, we can assume that
1080             // this means that it's live.
1081             if (!m_dfgData)
1082                 return true;
1083             
1084             // For simplicity, we don't attempt to jettison code blocks during GC if
1085             // they are executing. Instead we strongly mark their weak references to
1086             // allow them to continue to execute soundly.
1087             if (m_dfgData->mayBeExecuting)
1088                 return true;
1089
1090             return false;
1091         }
1092 #else
1093         bool shouldImmediatelyAssumeLivenessDuringScan() { return true; }
1094 #endif
1095         
1096         void performTracingFixpointIteration(SlotVisitor&);
1097         
1098         void stronglyVisitStrongReferences(SlotVisitor&);
1099         void stronglyVisitWeakReferences(SlotVisitor&);
1100
1101         void createRareDataIfNecessary()
1102         {
1103             if (!m_rareData)
1104                 m_rareData = adoptPtr(new RareData);
1105         }
1106         
1107         int m_numParameters;
1108
1109         WriteBarrier<ScriptExecutable> m_ownerExecutable;
1110         JSGlobalData* m_globalData;
1111
1112         struct Instructions : public RefCounted<Instructions> {
1113             Vector<Instruction> m_instructions;
1114         };
1115         RefPtr<Instructions> m_instructions;
1116         unsigned m_instructionCount;
1117
1118         int m_thisRegister;
1119         int m_argumentsRegister;
1120         int m_activationRegister;
1121
1122         bool m_needsFullScopeChain;
1123         bool m_usesEval;
1124         bool m_isNumericCompareFunction;
1125         bool m_isStrictMode;
1126
1127         CodeType m_codeType;
1128
1129         RefPtr<SourceProvider> m_source;
1130         unsigned m_sourceOffset;
1131
1132         Vector<unsigned> m_propertyAccessInstructions;
1133         Vector<unsigned> m_globalResolveInstructions;
1134 #if ENABLE(LLINT)
1135         SegmentedVector<LLIntCallLinkInfo, 8> m_llintCallLinkInfos;
1136         SentinelLinkedList<LLIntCallLinkInfo, BasicRawSentinelNode<LLIntCallLinkInfo> > m_incomingLLIntCalls;
1137 #endif
1138 #if ENABLE(JIT)
1139         Vector<StructureStubInfo> m_structureStubInfos;
1140         Vector<GlobalResolveInfo> m_globalResolveInfos;
1141         Vector<CallLinkInfo> m_callLinkInfos;
1142         Vector<MethodCallLinkInfo> m_methodCallLinkInfos;
1143         JITCode m_jitCode;
1144         MacroAssemblerCodePtr m_jitCodeWithArityCheck;
1145         SentinelLinkedList<CallLinkInfo, BasicRawSentinelNode<CallLinkInfo> > m_incomingCalls;
1146 #endif
1147 #if ENABLE(DFG_JIT) || ENABLE(LLINT)
1148         OwnPtr<CompactJITCodeMap> m_jitCodeMap;
1149 #endif
1150 #if ENABLE(DFG_JIT)
1151         struct WeakReferenceTransition {
1152             WeakReferenceTransition() { }
1153             
1154             WeakReferenceTransition(JSGlobalData& globalData, JSCell* owner, JSCell* codeOrigin, JSCell* from, JSCell* to)
1155                 : m_from(globalData, owner, from)
1156                 , m_to(globalData, owner, to)
1157             {
1158                 if (!!codeOrigin)
1159                     m_codeOrigin.set(globalData, owner, codeOrigin);
1160             }
1161
1162             WriteBarrier<JSCell> m_codeOrigin;
1163             WriteBarrier<JSCell> m_from;
1164             WriteBarrier<JSCell> m_to;
1165         };
1166         
1167         struct DFGData {
1168             DFGData()
1169                 : mayBeExecuting(false)
1170                 , isJettisoned(false)
1171             {
1172             }
1173             
1174             Vector<DFG::OSREntryData> osrEntry;
1175             SegmentedVector<DFG::OSRExit, 8> osrExit;
1176             Vector<DFG::SpeculationRecovery> speculationRecovery;
1177             Vector<WeakReferenceTransition> transitions;
1178             Vector<WriteBarrier<JSCell> > weakReferences;
1179             bool mayBeExecuting;
1180             bool isJettisoned;
1181             bool livenessHasBeenProved; // Initialized and used on every GC.
1182             bool allTransitionsHaveBeenMarked; // Initialized and used on every GC.
1183         };
1184         
1185         OwnPtr<DFGData> m_dfgData;
1186         
1187         // This is relevant to non-DFG code blocks that serve as the profiled code block
1188         // for DFG code blocks.
1189         DFG::ExitProfile m_exitProfile;
1190 #endif
1191 #if ENABLE(VALUE_PROFILER)
1192         Vector<ValueProfile> m_argumentValueProfiles;
1193         SegmentedVector<ValueProfile, 8> m_valueProfiles;
1194         SegmentedVector<RareCaseProfile, 8> m_rareCaseProfiles;
1195         SegmentedVector<RareCaseProfile, 8> m_specialFastCaseProfiles;
1196         unsigned m_executionEntryCount;
1197 #endif
1198
1199         Vector<unsigned> m_jumpTargets;
1200         Vector<unsigned> m_loopTargets;
1201
1202         // Constant Pool
1203         Vector<Identifier> m_identifiers;
1204         COMPILE_ASSERT(sizeof(Register) == sizeof(WriteBarrier<Unknown>), Register_must_be_same_size_as_WriteBarrier_Unknown);
1205         Vector<WriteBarrier<Unknown> > m_constantRegisters;
1206         Vector<WriteBarrier<FunctionExecutable> > m_functionDecls;
1207         Vector<WriteBarrier<FunctionExecutable> > m_functionExprs;
1208
1209         SymbolTable* m_symbolTable;
1210
1211         OwnPtr<CodeBlock> m_alternative;
1212         
1213         int32_t m_llintExecuteCounter;
1214         
1215         int32_t m_jitExecuteCounter;
1216         uint32_t m_speculativeSuccessCounter;
1217         uint32_t m_speculativeFailCounter;
1218         uint8_t m_optimizationDelayCounter;
1219         uint8_t m_reoptimizationRetryCounter;
1220         
1221         struct RareData {
1222            WTF_MAKE_FAST_ALLOCATED;
1223         public:
1224             Vector<HandlerInfo> m_exceptionHandlers;
1225
1226             // Rare Constants
1227             Vector<WriteBarrier<RegExp> > m_regexps;
1228
1229             // Buffers used for large array literals
1230             Vector<Vector<JSValue> > m_constantBuffers;
1231             
1232             // Jump Tables
1233             Vector<SimpleJumpTable> m_immediateSwitchJumpTables;
1234             Vector<SimpleJumpTable> m_characterSwitchJumpTables;
1235             Vector<StringJumpTable> m_stringSwitchJumpTables;
1236
1237             EvalCodeCache m_evalCodeCache;
1238
1239             // Expression info - present if debugging.
1240             Vector<ExpressionRangeInfo> m_expressionInfo;
1241             // Line info - present if profiling or debugging.
1242             Vector<LineInfo> m_lineInfo;
1243 #if ENABLE(JIT)
1244             Vector<CallReturnOffsetToBytecodeOffset> m_callReturnIndexVector;
1245 #endif
1246 #if ENABLE(DFG_JIT)
1247             SegmentedVector<InlineCallFrame, 4> m_inlineCallFrames;
1248             Vector<CodeOriginAtCallReturnOffset> m_codeOrigins;
1249 #endif
1250         };
1251 #if COMPILER(MSVC)
1252         friend void WTF::deleteOwnedPtr<RareData>(RareData*);
1253 #endif
1254         OwnPtr<RareData> m_rareData;
1255 #if ENABLE(JIT)
1256         CompileWithDFGState m_canCompileWithDFGState;
1257 #endif
1258     };
1259
1260     // Program code is not marked by any function, so we make the global object
1261     // responsible for marking it.
1262
1263     class GlobalCodeBlock : public CodeBlock {
1264     protected:
1265         GlobalCodeBlock(CopyParsedBlockTag, GlobalCodeBlock& other)
1266             : CodeBlock(CopyParsedBlock, other, &m_unsharedSymbolTable)
1267             , m_unsharedSymbolTable(other.m_unsharedSymbolTable)
1268         {
1269         }
1270         
1271         GlobalCodeBlock(ScriptExecutable* ownerExecutable, CodeType codeType, JSGlobalObject* globalObject, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, PassOwnPtr<CodeBlock> alternative)
1272             : CodeBlock(ownerExecutable, codeType, globalObject, sourceProvider, sourceOffset, &m_unsharedSymbolTable, false, alternative)
1273         {
1274         }
1275
1276     private:
1277         SymbolTable m_unsharedSymbolTable;
1278     };
1279
1280     class ProgramCodeBlock : public GlobalCodeBlock {
1281     public:
1282         ProgramCodeBlock(CopyParsedBlockTag, ProgramCodeBlock& other)
1283             : GlobalCodeBlock(CopyParsedBlock, other)
1284         {
1285         }
1286
1287         ProgramCodeBlock(ProgramExecutable* ownerExecutable, CodeType codeType, JSGlobalObject* globalObject, PassRefPtr<SourceProvider> sourceProvider, PassOwnPtr<CodeBlock> alternative)
1288             : GlobalCodeBlock(ownerExecutable, codeType, globalObject, sourceProvider, 0, alternative)
1289         {
1290         }
1291         
1292 #if ENABLE(JIT)
1293     protected:
1294         virtual JSObject* compileOptimized(ExecState*, ScopeChainNode*);
1295         virtual void jettison();
1296         virtual void jitCompileImpl(JSGlobalData&);
1297         virtual CodeBlock* replacement();
1298         virtual bool canCompileWithDFGInternal();
1299 #endif
1300     };
1301
1302     class EvalCodeBlock : public GlobalCodeBlock {
1303     public:
1304         EvalCodeBlock(CopyParsedBlockTag, EvalCodeBlock& other)
1305             : GlobalCodeBlock(CopyParsedBlock, other)
1306             , m_baseScopeDepth(other.m_baseScopeDepth)
1307             , m_variables(other.m_variables)
1308         {
1309         }
1310         
1311         EvalCodeBlock(EvalExecutable* ownerExecutable, JSGlobalObject* globalObject, PassRefPtr<SourceProvider> sourceProvider, int baseScopeDepth, PassOwnPtr<CodeBlock> alternative)
1312             : GlobalCodeBlock(ownerExecutable, EvalCode, globalObject, sourceProvider, 0, alternative)
1313             , m_baseScopeDepth(baseScopeDepth)
1314         {
1315         }
1316
1317         int baseScopeDepth() const { return m_baseScopeDepth; }
1318
1319         const Identifier& variable(unsigned index) { return m_variables[index]; }
1320         unsigned numVariables() { return m_variables.size(); }
1321         void adoptVariables(Vector<Identifier>& variables)
1322         {
1323             ASSERT(m_variables.isEmpty());
1324             m_variables.swap(variables);
1325         }
1326         
1327 #if ENABLE(JIT)
1328     protected:
1329         virtual JSObject* compileOptimized(ExecState*, ScopeChainNode*);
1330         virtual void jettison();
1331         virtual void jitCompileImpl(JSGlobalData&);
1332         virtual CodeBlock* replacement();
1333         virtual bool canCompileWithDFGInternal();
1334 #endif
1335
1336     private:
1337         int m_baseScopeDepth;
1338         Vector<Identifier> m_variables;
1339     };
1340
1341     class FunctionCodeBlock : public CodeBlock {
1342     public:
1343         FunctionCodeBlock(CopyParsedBlockTag, FunctionCodeBlock& other)
1344             : CodeBlock(CopyParsedBlock, other, other.sharedSymbolTable())
1345         {
1346             // The fact that we have to do this is yucky, but is necessary because of the
1347             // class hierarchy issues described in the comment block for the main
1348             // constructor, below.
1349             sharedSymbolTable()->ref();
1350         }
1351
1352         // Rather than using the usual RefCounted::create idiom for SharedSymbolTable we just use new
1353         // as we need to initialise the CodeBlock before we could initialise any RefPtr to hold the shared
1354         // symbol table, so we just pass as a raw pointer with a ref count of 1.  We then manually deref
1355         // in the destructor.
1356         FunctionCodeBlock(FunctionExecutable* ownerExecutable, CodeType codeType, JSGlobalObject* globalObject, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, bool isConstructor, PassOwnPtr<CodeBlock> alternative = nullptr)
1357             : CodeBlock(ownerExecutable, codeType, globalObject, sourceProvider, sourceOffset, SharedSymbolTable::create().leakRef(), isConstructor, alternative)
1358         {
1359         }
1360         ~FunctionCodeBlock()
1361         {
1362             sharedSymbolTable()->deref();
1363         }
1364         
1365 #if ENABLE(JIT)
1366     protected:
1367         virtual JSObject* compileOptimized(ExecState*, ScopeChainNode*);
1368         virtual void jettison();
1369         virtual void jitCompileImpl(JSGlobalData&);
1370         virtual CodeBlock* replacement();
1371         virtual bool canCompileWithDFGInternal();
1372 #endif
1373     };
1374
1375     // Use this if you want to copy a code block and you're paranoid about a GC
1376     // happening.
1377     class BytecodeDestructionBlocker {
1378     public:
1379         BytecodeDestructionBlocker(CodeBlock* codeBlock)
1380             : m_codeBlock(codeBlock)
1381             , m_oldValueOfShouldDiscardBytecode(codeBlock->m_shouldDiscardBytecode)
1382         {
1383             codeBlock->m_shouldDiscardBytecode = false;
1384         }
1385         
1386         ~BytecodeDestructionBlocker()
1387         {
1388             m_codeBlock->m_shouldDiscardBytecode = m_oldValueOfShouldDiscardBytecode;
1389         }
1390         
1391     private:
1392         CodeBlock* m_codeBlock;
1393         bool m_oldValueOfShouldDiscardBytecode;
1394     };
1395
1396     inline CodeBlock* baselineCodeBlockForOriginAndBaselineCodeBlock(const CodeOrigin& codeOrigin, CodeBlock* baselineCodeBlock)
1397     {
1398         if (codeOrigin.inlineCallFrame) {
1399             ExecutableBase* executable = codeOrigin.inlineCallFrame->executable.get();
1400             ASSERT(executable->structure()->classInfo() == &FunctionExecutable::s_info);
1401             return static_cast<FunctionExecutable*>(executable)->baselineCodeBlockFor(codeOrigin.inlineCallFrame->isCall ? CodeForCall : CodeForConstruct);
1402         }
1403         return baselineCodeBlock;
1404     }
1405     
1406
1407     inline Register& ExecState::r(int index)
1408     {
1409         CodeBlock* codeBlock = this->codeBlock();
1410         if (codeBlock->isConstantRegisterIndex(index))
1411             return *reinterpret_cast<Register*>(&codeBlock->constantRegister(index));
1412         return this[index];
1413     }
1414
1415     inline Register& ExecState::uncheckedR(int index)
1416     {
1417         ASSERT(index < FirstConstantRegisterIndex);
1418         return this[index];
1419     }
1420
1421 #if ENABLE(DFG_JIT)
1422     inline bool ExecState::isInlineCallFrame()
1423     {
1424         if (LIKELY(!codeBlock() || codeBlock()->getJITType() != JITCode::DFGJIT))
1425             return false;
1426         return isInlineCallFrameSlow();
1427     }
1428 #endif
1429
1430 #if ENABLE(DFG_JIT)
1431     inline void DFGCodeBlocks::mark(void* candidateCodeBlock)
1432     {
1433         // We have to check for 0 and -1 because those are used by the HashMap as markers.
1434         uintptr_t value = reinterpret_cast<uintptr_t>(candidateCodeBlock);
1435         
1436         // This checks for both of those nasty cases in one go.
1437         // 0 + 1 = 1
1438         // -1 + 1 = 0
1439         if (value + 1 <= 1)
1440             return;
1441         
1442         HashSet<CodeBlock*>::iterator iter = m_set.find(static_cast<CodeBlock*>(candidateCodeBlock));
1443         if (iter == m_set.end())
1444             return;
1445         
1446         (*iter)->m_dfgData->mayBeExecuting = true;
1447     }
1448 #endif
1449     
1450 } // namespace JSC
1451
1452 #endif // CodeBlock_h