Roll out r108309, r108323, and r108326
[WebKit-https.git] / Source / JavaScriptCore / bytecode / CodeBlock.h
1 /*
2  * Copyright (C) 2008, 2009, 2010 Apple Inc. All rights reserved.
3  * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1.  Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  * 2.  Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  * 3.  Neither the name of Apple Computer, Inc. ("Apple") nor the names of
15  *     its contributors may be used to endorse or promote products derived
16  *     from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
19  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21  * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
22  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29
30 #ifndef CodeBlock_h
31 #define CodeBlock_h
32
33 #include "CallLinkInfo.h"
34 #include "CallReturnOffsetToBytecodeOffset.h"
35 #include "CodeOrigin.h"
36 #include "CodeType.h"
37 #include "CompactJITCodeMap.h"
38 #include "DFGCodeBlocks.h"
39 #include "DFGExitProfile.h"
40 #include "DFGOSREntry.h"
41 #include "DFGOSRExit.h"
42 #include "EvalCodeCache.h"
43 #include "ExpressionRangeInfo.h"
44 #include "GlobalResolveInfo.h"
45 #include "HandlerInfo.h"
46 #include "MethodCallLinkInfo.h"
47 #include "Options.h"
48 #include "Instruction.h"
49 #include "JITCode.h"
50 #include "JITWriteBarrier.h"
51 #include "JSGlobalObject.h"
52 #include "JumpTable.h"
53 #include "LineInfo.h"
54 #include "Nodes.h"
55 #include "PredictionTracker.h"
56 #include "RegExpObject.h"
57 #include "StructureStubInfo.h"
58 #include "UString.h"
59 #include "UnconditionalFinalizer.h"
60 #include "ValueProfile.h"
61 #include <wtf/FastAllocBase.h>
62 #include <wtf/PassOwnPtr.h>
63 #include <wtf/RefPtr.h>
64 #include <wtf/SegmentedVector.h>
65 #include <wtf/Vector.h>
66 #include "StructureStubInfo.h"
67
68 // Register numbers used in bytecode operations have different meaning according to their ranges:
69 //      0x80000000-0xFFFFFFFF  Negative indices from the CallFrame pointer are entries in the call frame, see RegisterFile.h.
70 //      0x00000000-0x3FFFFFFF  Forwards indices from the CallFrame pointer are local vars and temporaries with the function's callframe.
71 //      0x40000000-0x7FFFFFFF  Positive indices from 0x40000000 specify entries in the constant pool on the CodeBlock.
72 static const int FirstConstantRegisterIndex = 0x40000000;
73
74 namespace JSC {
75
76     class ExecState;
77     class DFGCodeBlocks;
78
79     inline int unmodifiedArgumentsRegister(int argumentsRegister) { return argumentsRegister - 1; }
80
81     static ALWAYS_INLINE int missingThisObjectMarker() { return std::numeric_limits<int>::max(); }
82
83     class CodeBlock : public UnconditionalFinalizer, public WeakReferenceHarvester {
84         WTF_MAKE_FAST_ALLOCATED;
85         friend class JIT;
86     public:
87         enum CopyParsedBlockTag { CopyParsedBlock };
88     protected:
89         CodeBlock(CopyParsedBlockTag, CodeBlock& other, SymbolTable*);
90         
91         CodeBlock(ScriptExecutable* ownerExecutable, CodeType, JSGlobalObject*, PassRefPtr<SourceProvider>, unsigned sourceOffset, SymbolTable*, bool isConstructor, PassOwnPtr<CodeBlock> alternative);
92
93         WriteBarrier<JSGlobalObject> m_globalObject;
94         Heap* m_heap;
95
96     public:
97         JS_EXPORT_PRIVATE virtual ~CodeBlock();
98         
99         int numParameters() const { return m_numParameters; }
100         void setNumParameters(int newValue);
101         void addParameter();
102         
103         int* addressOfNumParameters() { return &m_numParameters; }
104         static ptrdiff_t offsetOfNumParameters() { return OBJECT_OFFSETOF(CodeBlock, m_numParameters); }
105
106         CodeBlock* alternative() { return m_alternative.get(); }
107         PassOwnPtr<CodeBlock> releaseAlternative() { return m_alternative.release(); }
108         void setAlternative(PassOwnPtr<CodeBlock> alternative) { m_alternative = alternative; }
109         
110         CodeSpecializationKind specializationKind()
111         {
112             if (m_isConstructor)
113                 return CodeForConstruct;
114             return CodeForCall;
115         }
116         
117 #if ENABLE(JIT)
118         CodeBlock* baselineVersion()
119         {
120             CodeBlock* result = replacement();
121             if (!result)
122                 return 0; // This can happen if we're in the process of creating the baseline version.
123             while (result->alternative())
124                 result = result->alternative();
125             ASSERT(result);
126             ASSERT(result->getJITType() == JITCode::BaselineJIT);
127             return result;
128         }
129 #endif
130         
131         bool canProduceCopyWithBytecode() { return hasInstructions(); }
132
133         void visitAggregate(SlotVisitor&);
134
135         static void dumpStatistics();
136
137         void dump(ExecState*) const;
138         void printStructures(const Instruction*) const;
139         void printStructure(const char* name, const Instruction*, int operand) const;
140
141         bool isStrictMode() const { return m_isStrictMode; }
142
143         inline bool isKnownNotImmediate(int index)
144         {
145             if (index == m_thisRegister && !m_isStrictMode)
146                 return true;
147
148             if (isConstantRegisterIndex(index))
149                 return getConstant(index).isCell();
150
151             return false;
152         }
153
154         ALWAYS_INLINE bool isTemporaryRegisterIndex(int index)
155         {
156             return index >= m_numVars;
157         }
158
159         HandlerInfo* handlerForBytecodeOffset(unsigned bytecodeOffset);
160         int lineNumberForBytecodeOffset(unsigned bytecodeOffset);
161         void expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot, int& startOffset, int& endOffset);
162
163 #if ENABLE(JIT)
164
165         StructureStubInfo& getStubInfo(ReturnAddressPtr returnAddress)
166         {
167             return *(binarySearch<StructureStubInfo, void*, getStructureStubInfoReturnLocation>(m_structureStubInfos.begin(), m_structureStubInfos.size(), returnAddress.value()));
168         }
169
170         StructureStubInfo& getStubInfo(unsigned bytecodeIndex)
171         {
172             return *(binarySearch<StructureStubInfo, unsigned, getStructureStubInfoBytecodeIndex>(m_structureStubInfos.begin(), m_structureStubInfos.size(), bytecodeIndex));
173         }
174
175         CallLinkInfo& getCallLinkInfo(ReturnAddressPtr returnAddress)
176         {
177             return *(binarySearch<CallLinkInfo, void*, getCallLinkInfoReturnLocation>(m_callLinkInfos.begin(), m_callLinkInfos.size(), returnAddress.value()));
178         }
179         
180         CallLinkInfo& getCallLinkInfo(unsigned bytecodeIndex)
181         {
182             return *(binarySearch<CallLinkInfo, unsigned, getCallLinkInfoBytecodeIndex>(m_callLinkInfos.begin(), m_callLinkInfos.size(), bytecodeIndex));
183         }
184
185         MethodCallLinkInfo& getMethodCallLinkInfo(ReturnAddressPtr returnAddress)
186         {
187             return *(binarySearch<MethodCallLinkInfo, void*, getMethodCallLinkInfoReturnLocation>(m_methodCallLinkInfos.begin(), m_methodCallLinkInfos.size(), returnAddress.value()));
188         }
189
190         MethodCallLinkInfo& getMethodCallLinkInfo(unsigned bytecodeIndex)
191         {
192             return *(binarySearch<MethodCallLinkInfo, unsigned, getMethodCallLinkInfoBytecodeIndex>(m_methodCallLinkInfos.begin(), m_methodCallLinkInfos.size(), bytecodeIndex));
193         }
194
195         unsigned bytecodeOffset(ReturnAddressPtr returnAddress)
196         {
197             if (!m_rareData)
198                 return 1;
199             Vector<CallReturnOffsetToBytecodeOffset>& callIndices = m_rareData->m_callReturnIndexVector;
200             if (!callIndices.size())
201                 return 1;
202             return binarySearch<CallReturnOffsetToBytecodeOffset, unsigned, getCallReturnOffset>(callIndices.begin(), callIndices.size(), getJITCode().offsetOf(returnAddress.value()))->bytecodeOffset;
203         }
204
205         unsigned bytecodeOffsetForCallAtIndex(unsigned index)
206         {
207             if (!m_rareData)
208                 return 1;
209             Vector<CallReturnOffsetToBytecodeOffset>& callIndices = m_rareData->m_callReturnIndexVector;
210             if (!callIndices.size())
211                 return 1;
212             ASSERT(index < m_rareData->m_callReturnIndexVector.size());
213             return m_rareData->m_callReturnIndexVector[index].bytecodeOffset;
214         }
215
216         void unlinkCalls();
217         
218         bool hasIncomingCalls() { return m_incomingCalls.begin() != m_incomingCalls.end(); }
219         
220         void linkIncomingCall(CallLinkInfo* incoming)
221         {
222             m_incomingCalls.push(incoming);
223         }
224         
225         void unlinkIncomingCalls();
226 #endif
227
228 #if ENABLE(DFG_JIT)
229         void setJITCodeMap(PassOwnPtr<CompactJITCodeMap> jitCodeMap)
230         {
231             m_jitCodeMap = jitCodeMap;
232         }
233         CompactJITCodeMap* jitCodeMap()
234         {
235             return m_jitCodeMap.get();
236         }
237         
238         void createDFGDataIfNecessary()
239         {
240             if (!!m_dfgData)
241                 return;
242             
243             m_dfgData = adoptPtr(new DFGData);
244         }
245         
246         DFG::OSREntryData* appendDFGOSREntryData(unsigned bytecodeIndex, unsigned machineCodeOffset)
247         {
248             createDFGDataIfNecessary();
249             DFG::OSREntryData entry;
250             entry.m_bytecodeIndex = bytecodeIndex;
251             entry.m_machineCodeOffset = machineCodeOffset;
252             m_dfgData->osrEntry.append(entry);
253             return &m_dfgData->osrEntry.last();
254         }
255         unsigned numberOfDFGOSREntries() const
256         {
257             if (!m_dfgData)
258                 return 0;
259             return m_dfgData->osrEntry.size();
260         }
261         DFG::OSREntryData* dfgOSREntryData(unsigned i) { return &m_dfgData->osrEntry[i]; }
262         DFG::OSREntryData* dfgOSREntryDataForBytecodeIndex(unsigned bytecodeIndex)
263         {
264             return binarySearch<DFG::OSREntryData, unsigned, DFG::getOSREntryDataBytecodeIndex>(m_dfgData->osrEntry.begin(), m_dfgData->osrEntry.size(), bytecodeIndex);
265         }
266         
267         void appendOSRExit(const DFG::OSRExit& osrExit)
268         {
269             createDFGDataIfNecessary();
270             m_dfgData->osrExit.append(osrExit);
271         }
272         
273         DFG::OSRExit& lastOSRExit()
274         {
275             return m_dfgData->osrExit.last();
276         }
277         
278         void appendSpeculationRecovery(const DFG::SpeculationRecovery& recovery)
279         {
280             createDFGDataIfNecessary();
281             m_dfgData->speculationRecovery.append(recovery);
282         }
283         
284         unsigned numberOfOSRExits()
285         {
286             if (!m_dfgData)
287                 return 0;
288             return m_dfgData->osrExit.size();
289         }
290         
291         unsigned numberOfSpeculationRecoveries()
292         {
293             if (!m_dfgData)
294                 return 0;
295             return m_dfgData->speculationRecovery.size();
296         }
297         
298         DFG::OSRExit& osrExit(unsigned index)
299         {
300             return m_dfgData->osrExit[index];
301         }
302         
303         DFG::SpeculationRecovery& speculationRecovery(unsigned index)
304         {
305             return m_dfgData->speculationRecovery[index];
306         }
307         
308         void appendWeakReference(JSCell* target)
309         {
310             createDFGDataIfNecessary();
311             m_dfgData->weakReferences.append(WriteBarrier<JSCell>(*globalData(), ownerExecutable(), target));
312         }
313         
314         void shrinkWeakReferencesToFit()
315         {
316             if (!m_dfgData)
317                 return;
318             m_dfgData->weakReferences.shrinkToFit();
319         }
320         
321         void appendWeakReferenceTransition(JSCell* codeOrigin, JSCell* from, JSCell* to)
322         {
323             createDFGDataIfNecessary();
324             m_dfgData->transitions.append(
325                 WeakReferenceTransition(*globalData(), ownerExecutable(), codeOrigin, from, to));
326         }
327         
328         void shrinkWeakReferenceTransitionsToFit()
329         {
330             if (!m_dfgData)
331                 return;
332             m_dfgData->transitions.shrinkToFit();
333         }
334 #endif
335
336 #if ENABLE(CLASSIC_INTERPRETER)
337         unsigned bytecodeOffset(Instruction* returnAddress)
338         {
339             return static_cast<Instruction*>(returnAddress) - instructions().begin();
340         }
341 #endif
342
343         void setIsNumericCompareFunction(bool isNumericCompareFunction) { m_isNumericCompareFunction = isNumericCompareFunction; }
344         bool isNumericCompareFunction() { return m_isNumericCompareFunction; }
345
346         bool hasInstructions() const { return !!m_instructions; }
347         unsigned numberOfInstructions() const { return !m_instructions ? 0 : m_instructions->m_instructions.size(); }
348         Vector<Instruction>& instructions() { return m_instructions->m_instructions; }
349         const Vector<Instruction>& instructions() const { return m_instructions->m_instructions; }
350         void discardBytecode() { m_instructions.clear(); }
351         void discardBytecodeLater()
352         {
353             m_shouldDiscardBytecode = true;
354         }
355         
356         bool usesOpcode(OpcodeID);
357
358         unsigned instructionCount() { return m_instructionCount; }
359         void setInstructionCount(unsigned instructionCount) { m_instructionCount = instructionCount; }
360
361 #if ENABLE(JIT)
362         void setJITCode(const JITCode& code, MacroAssemblerCodePtr codeWithArityCheck)
363         {
364             m_jitCode = code;
365             m_jitCodeWithArityCheck = codeWithArityCheck;
366 #if ENABLE(DFG_JIT)
367             if (m_jitCode.jitType() == JITCode::DFGJIT) {
368                 createDFGDataIfNecessary();
369                 m_globalData->heap.m_dfgCodeBlocks.m_set.add(this);
370             }
371 #endif
372         }
373         JITCode& getJITCode() { return m_jitCode; }
374         MacroAssemblerCodePtr getJITCodeWithArityCheck() { return m_jitCodeWithArityCheck; }
375         JITCode::JITType getJITType() { return m_jitCode.jitType(); }
376         ExecutableMemoryHandle* executableMemory() { return getJITCode().getExecutableMemory(); }
377         virtual JSObject* compileOptimized(ExecState*, ScopeChainNode*) = 0;
378         virtual void jettison() = 0;
379         virtual CodeBlock* replacement() = 0;
380
381         enum CompileWithDFGState {
382             CompileWithDFGFalse,
383             CompileWithDFGTrue,
384             CompileWithDFGUnset
385         };
386
387         virtual bool canCompileWithDFGInternal() = 0;
388         bool canCompileWithDFG()
389         {
390             bool result = canCompileWithDFGInternal();
391             m_canCompileWithDFGState = result ? CompileWithDFGTrue : CompileWithDFGFalse;
392             return result;
393         }
394         CompileWithDFGState canCompileWithDFGState() { return m_canCompileWithDFGState; }
395
396         bool hasOptimizedReplacement()
397         {
398             ASSERT(getJITType() == JITCode::BaselineJIT);
399             bool result = replacement()->getJITType() > getJITType();
400 #if !ASSERT_DISABLED
401             if (result)
402                 ASSERT(replacement()->getJITType() == JITCode::DFGJIT);
403             else {
404                 ASSERT(replacement()->getJITType() == JITCode::BaselineJIT);
405                 ASSERT(replacement() == this);
406             }
407 #endif
408             return result;
409         }
410 #else
411         JITCode::JITType getJITType() { return JITCode::BaselineJIT; }
412 #endif
413
414         ScriptExecutable* ownerExecutable() const { return m_ownerExecutable.get(); }
415
416         void setGlobalData(JSGlobalData* globalData) { m_globalData = globalData; }
417         JSGlobalData* globalData() { return m_globalData; }
418
419         void setThisRegister(int thisRegister) { m_thisRegister = thisRegister; }
420         int thisRegister() const { return m_thisRegister; }
421
422         void setNeedsFullScopeChain(bool needsFullScopeChain) { m_needsFullScopeChain = needsFullScopeChain; }
423         bool needsFullScopeChain() const { return m_needsFullScopeChain; }
424         void setUsesEval(bool usesEval) { m_usesEval = usesEval; }
425         bool usesEval() const { return m_usesEval; }
426         
427         void setArgumentsRegister(int argumentsRegister)
428         {
429             ASSERT(argumentsRegister != -1);
430             m_argumentsRegister = argumentsRegister;
431             ASSERT(usesArguments());
432         }
433         int argumentsRegister()
434         {
435             ASSERT(usesArguments());
436             return m_argumentsRegister;
437         }
438         void setActivationRegister(int activationRegister)
439         {
440             m_activationRegister = activationRegister;
441         }
442         int activationRegister()
443         {
444             ASSERT(needsFullScopeChain());
445             return m_activationRegister;
446         }
447         bool usesArguments() const { return m_argumentsRegister != -1; }
448
449         CodeType codeType() const { return m_codeType; }
450
451         SourceProvider* source() const { return m_source.get(); }
452         unsigned sourceOffset() const { return m_sourceOffset; }
453
454         size_t numberOfJumpTargets() const { return m_jumpTargets.size(); }
455         void addJumpTarget(unsigned jumpTarget) { m_jumpTargets.append(jumpTarget); }
456         unsigned jumpTarget(int index) const { return m_jumpTargets[index]; }
457         unsigned lastJumpTarget() const { return m_jumpTargets.last(); }
458
459         void createActivation(CallFrame*);
460
461         void clearEvalCache();
462
463 #if ENABLE(CLASSIC_INTERPRETER)
464         void addPropertyAccessInstruction(unsigned propertyAccessInstruction)
465         {
466             if (!m_globalData->canUseJIT())
467                 m_propertyAccessInstructions.append(propertyAccessInstruction);
468         }
469         void addGlobalResolveInstruction(unsigned globalResolveInstruction)
470         {
471             if (!m_globalData->canUseJIT())
472                 m_globalResolveInstructions.append(globalResolveInstruction);
473         }
474         bool hasGlobalResolveInstructionAtBytecodeOffset(unsigned bytecodeOffset);
475 #endif
476 #if ENABLE(JIT)
477         void setNumberOfStructureStubInfos(size_t size) { m_structureStubInfos.grow(size); }
478         size_t numberOfStructureStubInfos() const { return m_structureStubInfos.size(); }
479         StructureStubInfo& structureStubInfo(int index) { return m_structureStubInfos[index]; }
480
481         void addGlobalResolveInfo(unsigned globalResolveInstruction)
482         {
483             if (m_globalData->canUseJIT())
484                 m_globalResolveInfos.append(GlobalResolveInfo(globalResolveInstruction));
485         }
486         GlobalResolveInfo& globalResolveInfo(int index) { return m_globalResolveInfos[index]; }
487         bool hasGlobalResolveInfoAtBytecodeOffset(unsigned bytecodeOffset);
488
489         void setNumberOfCallLinkInfos(size_t size) { m_callLinkInfos.grow(size); }
490         size_t numberOfCallLinkInfos() const { return m_callLinkInfos.size(); }
491         CallLinkInfo& callLinkInfo(int index) { return m_callLinkInfos[index]; }
492
493         void addMethodCallLinkInfos(unsigned n) { ASSERT(m_globalData->canUseJIT()); m_methodCallLinkInfos.grow(n); }
494         MethodCallLinkInfo& methodCallLinkInfo(int index) { return m_methodCallLinkInfos[index]; }
495 #endif
496         
497 #if ENABLE(VALUE_PROFILER)
498         unsigned numberOfArgumentValueProfiles()
499         {
500             ASSERT(m_numParameters >= 0);
501             ASSERT(m_argumentValueProfiles.size() == static_cast<unsigned>(m_numParameters));
502             return m_argumentValueProfiles.size();
503         }
504         ValueProfile* valueProfileForArgument(unsigned argumentIndex)
505         {
506             ValueProfile* result = &m_argumentValueProfiles[argumentIndex];
507             ASSERT(result->m_bytecodeOffset == -1);
508             return result;
509         }
510         
511         ValueProfile* addValueProfile(int bytecodeOffset)
512         {
513             ASSERT(bytecodeOffset != -1);
514             ASSERT(m_valueProfiles.isEmpty() || m_valueProfiles.last().m_bytecodeOffset < bytecodeOffset);
515             m_valueProfiles.append(ValueProfile(bytecodeOffset));
516             return &m_valueProfiles.last();
517         }
518         unsigned numberOfValueProfiles() { return m_valueProfiles.size(); }
519         ValueProfile* valueProfile(int index)
520         {
521             ValueProfile* result = &m_valueProfiles[index];
522             ASSERT(result->m_bytecodeOffset != -1);
523             return result;
524         }
525         ValueProfile* valueProfileForBytecodeOffset(int bytecodeOffset)
526         {
527             ValueProfile* result = WTF::genericBinarySearch<ValueProfile, int, getValueProfileBytecodeOffset>(m_valueProfiles, m_valueProfiles.size(), bytecodeOffset);
528             ASSERT(result->m_bytecodeOffset != -1);
529             ASSERT(!hasInstructions()
530                    || instructions()[bytecodeOffset + opcodeLength(
531                            m_globalData->interpreter->getOpcodeID(
532                                instructions()[
533                                    bytecodeOffset].u.opcode)) - 1].u.profile == result);
534             return result;
535         }
536         
537         unsigned totalNumberOfValueProfiles()
538         {
539             return numberOfArgumentValueProfiles() + numberOfValueProfiles();
540         }
541         ValueProfile* getFromAllValueProfiles(unsigned index)
542         {
543             if (index < numberOfArgumentValueProfiles())
544                 return valueProfileForArgument(index);
545             return valueProfile(index - numberOfArgumentValueProfiles());
546         }
547         
548         RareCaseProfile* addRareCaseProfile(int bytecodeOffset)
549         {
550             m_rareCaseProfiles.append(RareCaseProfile(bytecodeOffset));
551             return &m_rareCaseProfiles.last();
552         }
553         unsigned numberOfRareCaseProfiles() { return m_rareCaseProfiles.size(); }
554         RareCaseProfile* rareCaseProfile(int index) { return &m_rareCaseProfiles[index]; }
555         RareCaseProfile* rareCaseProfileForBytecodeOffset(int bytecodeOffset)
556         {
557             return WTF::genericBinarySearch<RareCaseProfile, int, getRareCaseProfileBytecodeOffset>(m_rareCaseProfiles, m_rareCaseProfiles.size(), bytecodeOffset);
558         }
559         
560         bool likelyToTakeSlowCase(int bytecodeOffset)
561         {
562             unsigned value = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
563             return value >= Options::likelyToTakeSlowCaseMinimumCount && static_cast<double>(value) / m_executionEntryCount >= Options::likelyToTakeSlowCaseThreshold;
564         }
565         
566         bool couldTakeSlowCase(int bytecodeOffset)
567         {
568             unsigned value = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
569             return value >= Options::couldTakeSlowCaseMinimumCount && static_cast<double>(value) / m_executionEntryCount >= Options::couldTakeSlowCaseThreshold;
570         }
571         
572         RareCaseProfile* addSpecialFastCaseProfile(int bytecodeOffset)
573         {
574             m_specialFastCaseProfiles.append(RareCaseProfile(bytecodeOffset));
575             return &m_specialFastCaseProfiles.last();
576         }
577         unsigned numberOfSpecialFastCaseProfiles() { return m_specialFastCaseProfiles.size(); }
578         RareCaseProfile* specialFastCaseProfile(int index) { return &m_specialFastCaseProfiles[index]; }
579         RareCaseProfile* specialFastCaseProfileForBytecodeOffset(int bytecodeOffset)
580         {
581             return WTF::genericBinarySearch<RareCaseProfile, int, getRareCaseProfileBytecodeOffset>(m_specialFastCaseProfiles, m_specialFastCaseProfiles.size(), bytecodeOffset);
582         }
583         
584         bool likelyToTakeSpecialFastCase(int bytecodeOffset)
585         {
586             unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
587             return specialFastCaseCount >= Options::likelyToTakeSlowCaseMinimumCount && static_cast<double>(specialFastCaseCount) / m_executionEntryCount >= Options::likelyToTakeSlowCaseThreshold;
588         }
589         
590         bool likelyToTakeDeepestSlowCase(int bytecodeOffset)
591         {
592             unsigned slowCaseCount = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
593             unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
594             unsigned value = slowCaseCount - specialFastCaseCount;
595             return value >= Options::likelyToTakeSlowCaseMinimumCount && static_cast<double>(value) / m_executionEntryCount >= Options::likelyToTakeSlowCaseThreshold;
596         }
597         
598         bool likelyToTakeAnySlowCase(int bytecodeOffset)
599         {
600             unsigned slowCaseCount = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
601             unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
602             unsigned value = slowCaseCount + specialFastCaseCount;
603             return value >= Options::likelyToTakeSlowCaseMinimumCount && static_cast<double>(value) / m_executionEntryCount >= Options::likelyToTakeSlowCaseThreshold;
604         }
605         
606         unsigned executionEntryCount() const { return m_executionEntryCount; }
607 #endif
608
609         unsigned globalResolveInfoCount() const
610         {
611 #if ENABLE(JIT)    
612             if (m_globalData->canUseJIT())
613                 return m_globalResolveInfos.size();
614 #endif
615             return 0;
616         }
617
618         // Exception handling support
619
620         size_t numberOfExceptionHandlers() const { return m_rareData ? m_rareData->m_exceptionHandlers.size() : 0; }
621         void addExceptionHandler(const HandlerInfo& hanler) { createRareDataIfNecessary(); return m_rareData->m_exceptionHandlers.append(hanler); }
622         HandlerInfo& exceptionHandler(int index) { ASSERT(m_rareData); return m_rareData->m_exceptionHandlers[index]; }
623
624         void addExpressionInfo(const ExpressionRangeInfo& expressionInfo)
625         {
626             createRareDataIfNecessary();
627             m_rareData->m_expressionInfo.append(expressionInfo);
628         }
629
630         void addLineInfo(unsigned bytecodeOffset, int lineNo)
631         {
632             createRareDataIfNecessary();
633             Vector<LineInfo>& lineInfo = m_rareData->m_lineInfo;
634             if (!lineInfo.size() || lineInfo.last().lineNumber != lineNo) {
635                 LineInfo info = { bytecodeOffset, lineNo };
636                 lineInfo.append(info);
637             }
638         }
639
640         bool hasExpressionInfo() { return m_rareData && m_rareData->m_expressionInfo.size(); }
641         bool hasLineInfo() { return m_rareData && m_rareData->m_lineInfo.size(); }
642         //  We only generate exception handling info if the user is debugging
643         // (and may want line number info), or if the function contains exception handler.
644         bool needsCallReturnIndices()
645         {
646             return m_rareData &&
647                 (m_rareData->m_expressionInfo.size() || m_rareData->m_lineInfo.size() || m_rareData->m_exceptionHandlers.size());
648         }
649
650 #if ENABLE(JIT)
651         Vector<CallReturnOffsetToBytecodeOffset>& callReturnIndexVector()
652         {
653             createRareDataIfNecessary();
654             return m_rareData->m_callReturnIndexVector;
655         }
656 #endif
657
658 #if ENABLE(DFG_JIT)
659         SegmentedVector<InlineCallFrame, 4>& inlineCallFrames()
660         {
661             createRareDataIfNecessary();
662             return m_rareData->m_inlineCallFrames;
663         }
664         
665         Vector<CodeOriginAtCallReturnOffset>& codeOrigins()
666         {
667             createRareDataIfNecessary();
668             return m_rareData->m_codeOrigins;
669         }
670         
671         // Having code origins implies that there has been some inlining.
672         bool hasCodeOrigins()
673         {
674             return m_rareData && !!m_rareData->m_codeOrigins.size();
675         }
676         
677         bool codeOriginForReturn(ReturnAddressPtr returnAddress, CodeOrigin& codeOrigin)
678         {
679             if (!hasCodeOrigins())
680                 return false;
681             unsigned offset = getJITCode().offsetOf(returnAddress.value());
682             CodeOriginAtCallReturnOffset* entry = binarySearch<CodeOriginAtCallReturnOffset, unsigned, getCallReturnOffsetForCodeOrigin>(codeOrigins().begin(), codeOrigins().size(), offset, WTF::KeyMustNotBePresentInArray);
683             if (entry->callReturnOffset != offset)
684                 return false;
685             codeOrigin = entry->codeOrigin;
686             return true;
687         }
688         
689         CodeOrigin codeOrigin(unsigned index)
690         {
691             ASSERT(m_rareData);
692             return m_rareData->m_codeOrigins[index].codeOrigin;
693         }
694         
695         bool addFrequentExitSite(const DFG::FrequentExitSite& site)
696         {
697             ASSERT(getJITType() == JITCode::BaselineJIT);
698             return m_exitProfile.add(site);
699         }
700
701         DFG::ExitProfile& exitProfile() { return m_exitProfile; }
702 #endif
703
704         // Constant Pool
705
706         size_t numberOfIdentifiers() const { return m_identifiers.size(); }
707         void addIdentifier(const Identifier& i) { return m_identifiers.append(i); }
708         Identifier& identifier(int index) { return m_identifiers[index]; }
709
710         size_t numberOfConstantRegisters() const { return m_constantRegisters.size(); }
711         unsigned addConstant(JSValue v)
712         {
713             unsigned result = m_constantRegisters.size();
714             m_constantRegisters.append(WriteBarrier<Unknown>());
715             m_constantRegisters.last().set(m_globalObject->globalData(), m_ownerExecutable.get(), v);
716             return result;
717         }
718         unsigned addOrFindConstant(JSValue);
719         WriteBarrier<Unknown>& constantRegister(int index) { return m_constantRegisters[index - FirstConstantRegisterIndex]; }
720         ALWAYS_INLINE bool isConstantRegisterIndex(int index) const { return index >= FirstConstantRegisterIndex; }
721         ALWAYS_INLINE JSValue getConstant(int index) const { return m_constantRegisters[index - FirstConstantRegisterIndex].get(); }
722
723         unsigned addFunctionDecl(FunctionExecutable* n)
724         {
725             unsigned size = m_functionDecls.size();
726             m_functionDecls.append(WriteBarrier<FunctionExecutable>());
727             m_functionDecls.last().set(m_globalObject->globalData(), m_ownerExecutable.get(), n);
728             return size;
729         }
730         FunctionExecutable* functionDecl(int index) { return m_functionDecls[index].get(); }
731         int numberOfFunctionDecls() { return m_functionDecls.size(); }
732         unsigned addFunctionExpr(FunctionExecutable* n)
733         {
734             unsigned size = m_functionExprs.size();
735             m_functionExprs.append(WriteBarrier<FunctionExecutable>());
736             m_functionExprs.last().set(m_globalObject->globalData(), m_ownerExecutable.get(), n);
737             return size;
738         }
739         FunctionExecutable* functionExpr(int index) { return m_functionExprs[index].get(); }
740
741         unsigned addRegExp(RegExp* r)
742         {
743             createRareDataIfNecessary();
744             unsigned size = m_rareData->m_regexps.size();
745             m_rareData->m_regexps.append(WriteBarrier<RegExp>(*m_globalData, ownerExecutable(), r));
746             return size;
747         }
748         unsigned numberOfRegExps() const
749         {
750             if (!m_rareData)
751                 return 0;
752             return m_rareData->m_regexps.size();
753         }
754         RegExp* regexp(int index) const { ASSERT(m_rareData); return m_rareData->m_regexps[index].get(); }
755
756         unsigned addConstantBuffer(unsigned length)
757         {
758             createRareDataIfNecessary();
759             unsigned size = m_rareData->m_constantBuffers.size();
760             m_rareData->m_constantBuffers.append(Vector<JSValue>(length));
761             return size;
762         }
763
764         JSValue* constantBuffer(unsigned index)
765         {
766             ASSERT(m_rareData);
767             return m_rareData->m_constantBuffers[index].data();
768         }
769
770         JSGlobalObject* globalObject() { return m_globalObject.get(); }
771         
772         JSGlobalObject* globalObjectFor(CodeOrigin codeOrigin)
773         {
774             if (!codeOrigin.inlineCallFrame)
775                 return globalObject();
776             // FIXME: if we ever inline based on executable not function, this code will need to change.
777             return codeOrigin.inlineCallFrame->callee->scope()->globalObject.get();
778         }
779
780         // Jump Tables
781
782         size_t numberOfImmediateSwitchJumpTables() const { return m_rareData ? m_rareData->m_immediateSwitchJumpTables.size() : 0; }
783         SimpleJumpTable& addImmediateSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_immediateSwitchJumpTables.append(SimpleJumpTable()); return m_rareData->m_immediateSwitchJumpTables.last(); }
784         SimpleJumpTable& immediateSwitchJumpTable(int tableIndex) { ASSERT(m_rareData); return m_rareData->m_immediateSwitchJumpTables[tableIndex]; }
785
786         size_t numberOfCharacterSwitchJumpTables() const { return m_rareData ? m_rareData->m_characterSwitchJumpTables.size() : 0; }
787         SimpleJumpTable& addCharacterSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_characterSwitchJumpTables.append(SimpleJumpTable()); return m_rareData->m_characterSwitchJumpTables.last(); }
788         SimpleJumpTable& characterSwitchJumpTable(int tableIndex) { ASSERT(m_rareData); return m_rareData->m_characterSwitchJumpTables[tableIndex]; }
789
790         size_t numberOfStringSwitchJumpTables() const { return m_rareData ? m_rareData->m_stringSwitchJumpTables.size() : 0; }
791         StringJumpTable& addStringSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_stringSwitchJumpTables.append(StringJumpTable()); return m_rareData->m_stringSwitchJumpTables.last(); }
792         StringJumpTable& stringSwitchJumpTable(int tableIndex) { ASSERT(m_rareData); return m_rareData->m_stringSwitchJumpTables[tableIndex]; }
793
794
795         SymbolTable* symbolTable() { return m_symbolTable; }
796         SharedSymbolTable* sharedSymbolTable() { ASSERT(m_codeType == FunctionCode); return static_cast<SharedSymbolTable*>(m_symbolTable); }
797
798         EvalCodeCache& evalCodeCache() { createRareDataIfNecessary(); return m_rareData->m_evalCodeCache; }
799
800         void shrinkToFit();
801         
802         void copyPostParseDataFrom(CodeBlock* alternative);
803         void copyPostParseDataFromAlternative();
804         
805         // Functions for controlling when tiered compilation kicks in. This
806         // controls both when the optimizing compiler is invoked and when OSR
807         // entry happens. Two triggers exist: the loop trigger and the return
808         // trigger. In either case, when an addition to m_jitExecuteCounter
809         // causes it to become non-negative, the optimizing compiler is
810         // invoked. This includes a fast check to see if this CodeBlock has
811         // already been optimized (i.e. replacement() returns a CodeBlock
812         // that was optimized with a higher tier JIT than this one). In the
813         // case of the loop trigger, if the optimized compilation succeeds
814         // (or has already succeeded in the past) then OSR is attempted to
815         // redirect program flow into the optimized code.
816         
817         // These functions are called from within the optimization triggers,
818         // and are used as a single point at which we define the heuristics
819         // for how much warm-up is mandated before the next optimization
820         // trigger files. All CodeBlocks start out with optimizeAfterWarmUp(),
821         // as this is called from the CodeBlock constructor.
822         
823         // When we observe a lot of speculation failures, we trigger a
824         // reoptimization. But each time, we increase the optimization trigger
825         // to avoid thrashing.
826         unsigned reoptimizationRetryCounter() const
827         {
828             ASSERT(m_reoptimizationRetryCounter <= Options::reoptimizationRetryCounterMax);
829             return m_reoptimizationRetryCounter;
830         }
831         
832         void countReoptimization()
833         {
834             m_reoptimizationRetryCounter++;
835             if (m_reoptimizationRetryCounter > Options::reoptimizationRetryCounterMax)
836                 m_reoptimizationRetryCounter = Options::reoptimizationRetryCounterMax;
837         }
838         
839         int32_t counterValueForOptimizeAfterWarmUp()
840         {
841             return Options::executionCounterValueForOptimizeAfterWarmUp << reoptimizationRetryCounter();
842         }
843         
844         int32_t counterValueForOptimizeAfterLongWarmUp()
845         {
846             return Options::executionCounterValueForOptimizeAfterLongWarmUp << reoptimizationRetryCounter();
847         }
848         
849         int32_t* addressOfJITExecuteCounter()
850         {
851             return &m_jitExecuteCounter;
852         }
853         
854         static ptrdiff_t offsetOfJITExecuteCounter() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter); }
855
856         int32_t jitExecuteCounter() const { return m_jitExecuteCounter; }
857         
858         unsigned optimizationDelayCounter() const { return m_optimizationDelayCounter; }
859         
860         // Call this to force the next optimization trigger to fire. This is
861         // rarely wise, since optimization triggers are typically more
862         // expensive than executing baseline code.
863         void optimizeNextInvocation()
864         {
865             m_jitExecuteCounter = Options::executionCounterValueForOptimizeNextInvocation;
866         }
867         
868         // Call this to prevent optimization from happening again. Note that
869         // optimization will still happen after roughly 2^29 invocations,
870         // so this is really meant to delay that as much as possible. This
871         // is called if optimization failed, and we expect it to fail in
872         // the future as well.
873         void dontOptimizeAnytimeSoon()
874         {
875             m_jitExecuteCounter = Options::executionCounterValueForDontOptimizeAnytimeSoon;
876         }
877         
878         // Call this to reinitialize the counter to its starting state,
879         // forcing a warm-up to happen before the next optimization trigger
880         // fires. This is called in the CodeBlock constructor. It also
881         // makes sense to call this if an OSR exit occurred. Note that
882         // OSR exit code is code generated, so the value of the execute
883         // counter that this corresponds to is also available directly.
884         void optimizeAfterWarmUp()
885         {
886             m_jitExecuteCounter = counterValueForOptimizeAfterWarmUp();
887         }
888         
889         // Call this to force an optimization trigger to fire only after
890         // a lot of warm-up.
891         void optimizeAfterLongWarmUp()
892         {
893             m_jitExecuteCounter = counterValueForOptimizeAfterLongWarmUp();
894         }
895         
896         // Call this to cause an optimization trigger to fire soon, but
897         // not necessarily the next one. This makes sense if optimization
898         // succeeds. Successfuly optimization means that all calls are
899         // relinked to the optimized code, so this only affects call
900         // frames that are still executing this CodeBlock. The value here
901         // is tuned to strike a balance between the cost of OSR entry
902         // (which is too high to warrant making every loop back edge to
903         // trigger OSR immediately) and the cost of executing baseline
904         // code (which is high enough that we don't necessarily want to
905         // have a full warm-up). The intuition for calling this instead of
906         // optimizeNextInvocation() is for the case of recursive functions
907         // with loops. Consider that there may be N call frames of some
908         // recursive function, for a reasonably large value of N. The top
909         // one triggers optimization, and then returns, and then all of
910         // the others return. We don't want optimization to be triggered on
911         // each return, as that would be superfluous. It only makes sense
912         // to trigger optimization if one of those functions becomes hot
913         // in the baseline code.
914         void optimizeSoon()
915         {
916             m_jitExecuteCounter = Options::executionCounterValueForOptimizeSoon << reoptimizationRetryCounter();
917         }
918         
919         // The speculative JIT tracks its success rate, so that we can
920         // decide when to reoptimize. It's interesting to note that these
921         // counters may overflow without any protection. The success
922         // counter will overflow before the fail one does, becuase the
923         // fail one is used as a trigger to reoptimize. So the worst case
924         // is that the success counter overflows and we reoptimize without
925         // needing to. But this is harmless. If a method really did
926         // execute 2^32 times then compiling it again probably won't hurt
927         // anyone.
928         
929         void countSpeculationSuccess()
930         {
931             m_speculativeSuccessCounter++;
932         }
933         
934         void countSpeculationFailure()
935         {
936             m_speculativeFailCounter++;
937         }
938         
939         uint32_t speculativeSuccessCounter() const { return m_speculativeSuccessCounter; }
940         uint32_t speculativeFailCounter() const { return m_speculativeFailCounter; }
941         
942         uint32_t* addressOfSpeculativeSuccessCounter() { return &m_speculativeSuccessCounter; }
943         uint32_t* addressOfSpeculativeFailCounter() { return &m_speculativeFailCounter; }
944         
945         static ptrdiff_t offsetOfSpeculativeSuccessCounter() { return OBJECT_OFFSETOF(CodeBlock, m_speculativeSuccessCounter); }
946         static ptrdiff_t offsetOfSpeculativeFailCounter() { return OBJECT_OFFSETOF(CodeBlock, m_speculativeFailCounter); }
947
948 #if ENABLE(JIT)
949         // The number of failures that triggers the use of the ratio.
950         unsigned largeFailCountThreshold() { return Options::largeFailCountThresholdBase << baselineVersion()->reoptimizationRetryCounter(); }
951         unsigned largeFailCountThresholdForLoop() { return Options::largeFailCountThresholdBaseForLoop << baselineVersion()->reoptimizationRetryCounter(); }
952
953         bool shouldReoptimizeNow()
954         {
955             return Options::desiredSpeculativeSuccessFailRatio * speculativeFailCounter() >= speculativeSuccessCounter() && speculativeFailCounter() >= largeFailCountThreshold();
956         }
957
958         bool shouldReoptimizeFromLoopNow()
959         {
960             return Options::desiredSpeculativeSuccessFailRatio * speculativeFailCounter() >= speculativeSuccessCounter() && speculativeFailCounter() >= largeFailCountThresholdForLoop();
961         }
962 #endif
963
964 #if ENABLE(VALUE_PROFILER)
965         bool shouldOptimizeNow();
966 #else
967         bool shouldOptimizeNow() { return false; }
968 #endif
969         
970 #if ENABLE(JIT)
971         void reoptimize()
972         {
973             ASSERT(replacement() != this);
974             ASSERT(replacement()->alternative() == this);
975             replacement()->tallyFrequentExitSites();
976             replacement()->jettison();
977             countReoptimization();
978             optimizeAfterWarmUp();
979         }
980 #endif
981
982 #if ENABLE(VERBOSE_VALUE_PROFILE)
983         void dumpValueProfiles();
984 #endif
985         
986         // FIXME: Make these remaining members private.
987
988         int m_numCalleeRegisters;
989         int m_numVars;
990         int m_numCapturedVars;
991         bool m_isConstructor;
992
993         // This is public because otherwise we would have many friends.
994         bool m_shouldDiscardBytecode;
995
996     protected:
997         virtual void visitWeakReferences(SlotVisitor&);
998         virtual void finalizeUnconditionally();
999         
1000     private:
1001         friend class DFGCodeBlocks;
1002         
1003 #if ENABLE(DFG_JIT)
1004         void tallyFrequentExitSites();
1005 #else
1006         void tallyFrequentExitSites() { }
1007 #endif
1008         
1009         void dump(ExecState*, const Vector<Instruction>::const_iterator& begin, Vector<Instruction>::const_iterator&) const;
1010
1011         CString registerName(ExecState*, int r) const;
1012         void printUnaryOp(ExecState*, int location, Vector<Instruction>::const_iterator&, const char* op) const;
1013         void printBinaryOp(ExecState*, int location, Vector<Instruction>::const_iterator&, const char* op) const;
1014         void printConditionalJump(ExecState*, const Vector<Instruction>::const_iterator&, Vector<Instruction>::const_iterator&, int location, const char* op) const;
1015         void printGetByIdOp(ExecState*, int location, Vector<Instruction>::const_iterator&, const char* op) const;
1016         void printCallOp(ExecState*, int location, Vector<Instruction>::const_iterator&, const char* op) const;
1017         void printPutByIdOp(ExecState*, int location, Vector<Instruction>::const_iterator&, const char* op) const;
1018         void visitStructures(SlotVisitor&, Instruction* vPC) const;
1019         
1020 #if ENABLE(DFG_JIT)
1021         bool shouldImmediatelyAssumeLivenessDuringScan()
1022         {
1023             // Null m_dfgData means that this is a baseline JIT CodeBlock. Baseline JIT
1024             // CodeBlocks don't need to be jettisoned when their weak references go
1025             // stale. So if a basline JIT CodeBlock gets scanned, we can assume that
1026             // this means that it's live.
1027             if (!m_dfgData)
1028                 return true;
1029             
1030             // For simplicity, we don't attempt to jettison code blocks during GC if
1031             // they are executing. Instead we strongly mark their weak references to
1032             // allow them to continue to execute soundly.
1033             if (m_dfgData->mayBeExecuting)
1034                 return true;
1035
1036             return false;
1037         }
1038 #else
1039         bool shouldImmediatelyAssumeLivenessDuringScan() { return true; }
1040 #endif
1041         
1042         void performTracingFixpointIteration(SlotVisitor&);
1043         
1044         void stronglyVisitStrongReferences(SlotVisitor&);
1045         void stronglyVisitWeakReferences(SlotVisitor&);
1046
1047         void createRareDataIfNecessary()
1048         {
1049             if (!m_rareData)
1050                 m_rareData = adoptPtr(new RareData);
1051         }
1052         
1053         int m_numParameters;
1054
1055         WriteBarrier<ScriptExecutable> m_ownerExecutable;
1056         JSGlobalData* m_globalData;
1057
1058         struct Instructions : public RefCounted<Instructions> {
1059             Vector<Instruction> m_instructions;
1060         };
1061         RefPtr<Instructions> m_instructions;
1062         unsigned m_instructionCount;
1063
1064         int m_thisRegister;
1065         int m_argumentsRegister;
1066         int m_activationRegister;
1067
1068         bool m_needsFullScopeChain;
1069         bool m_usesEval;
1070         bool m_isNumericCompareFunction;
1071         bool m_isStrictMode;
1072
1073         CodeType m_codeType;
1074
1075         RefPtr<SourceProvider> m_source;
1076         unsigned m_sourceOffset;
1077
1078 #if ENABLE(CLASSIC_INTERPRETER)
1079         Vector<unsigned> m_propertyAccessInstructions;
1080         Vector<unsigned> m_globalResolveInstructions;
1081 #endif
1082 #if ENABLE(JIT)
1083         Vector<StructureStubInfo> m_structureStubInfos;
1084         Vector<GlobalResolveInfo> m_globalResolveInfos;
1085         Vector<CallLinkInfo> m_callLinkInfos;
1086         Vector<MethodCallLinkInfo> m_methodCallLinkInfos;
1087         JITCode m_jitCode;
1088         MacroAssemblerCodePtr m_jitCodeWithArityCheck;
1089         SentinelLinkedList<CallLinkInfo, BasicRawSentinelNode<CallLinkInfo> > m_incomingCalls;
1090 #endif
1091 #if ENABLE(DFG_JIT)
1092         OwnPtr<CompactJITCodeMap> m_jitCodeMap;
1093         
1094         struct WeakReferenceTransition {
1095             WeakReferenceTransition() { }
1096             
1097             WeakReferenceTransition(JSGlobalData& globalData, JSCell* owner, JSCell* codeOrigin, JSCell* from, JSCell* to)
1098                 : m_from(globalData, owner, from)
1099                 , m_to(globalData, owner, to)
1100             {
1101                 if (!!codeOrigin)
1102                     m_codeOrigin.set(globalData, owner, codeOrigin);
1103             }
1104
1105             WriteBarrier<JSCell> m_codeOrigin;
1106             WriteBarrier<JSCell> m_from;
1107             WriteBarrier<JSCell> m_to;
1108         };
1109         
1110         struct DFGData {
1111             DFGData()
1112                 : mayBeExecuting(false)
1113                 , isJettisoned(false)
1114             {
1115             }
1116             
1117             Vector<DFG::OSREntryData> osrEntry;
1118             SegmentedVector<DFG::OSRExit, 8> osrExit;
1119             Vector<DFG::SpeculationRecovery> speculationRecovery;
1120             Vector<WeakReferenceTransition> transitions;
1121             Vector<WriteBarrier<JSCell> > weakReferences;
1122             bool mayBeExecuting;
1123             bool isJettisoned;
1124             bool livenessHasBeenProved; // Initialized and used on every GC.
1125             bool allTransitionsHaveBeenMarked; // Initialized and used on every GC.
1126         };
1127         
1128         OwnPtr<DFGData> m_dfgData;
1129         
1130         // This is relevant to non-DFG code blocks that serve as the profiled code block
1131         // for DFG code blocks.
1132         DFG::ExitProfile m_exitProfile;
1133 #endif
1134 #if ENABLE(VALUE_PROFILER)
1135         Vector<ValueProfile> m_argumentValueProfiles;
1136         SegmentedVector<ValueProfile, 8> m_valueProfiles;
1137         SegmentedVector<RareCaseProfile, 8> m_rareCaseProfiles;
1138         SegmentedVector<RareCaseProfile, 8> m_specialFastCaseProfiles;
1139         unsigned m_executionEntryCount;
1140 #endif
1141
1142         Vector<unsigned> m_jumpTargets;
1143         Vector<unsigned> m_loopTargets;
1144
1145         // Constant Pool
1146         Vector<Identifier> m_identifiers;
1147         COMPILE_ASSERT(sizeof(Register) == sizeof(WriteBarrier<Unknown>), Register_must_be_same_size_as_WriteBarrier_Unknown);
1148         Vector<WriteBarrier<Unknown> > m_constantRegisters;
1149         Vector<WriteBarrier<FunctionExecutable> > m_functionDecls;
1150         Vector<WriteBarrier<FunctionExecutable> > m_functionExprs;
1151
1152         SymbolTable* m_symbolTable;
1153
1154         OwnPtr<CodeBlock> m_alternative;
1155         
1156         int32_t m_jitExecuteCounter;
1157         uint32_t m_speculativeSuccessCounter;
1158         uint32_t m_speculativeFailCounter;
1159         uint8_t m_optimizationDelayCounter;
1160         uint8_t m_reoptimizationRetryCounter;
1161
1162         struct RareData {
1163            WTF_MAKE_FAST_ALLOCATED;
1164         public:
1165             Vector<HandlerInfo> m_exceptionHandlers;
1166
1167             // Rare Constants
1168             Vector<WriteBarrier<RegExp> > m_regexps;
1169
1170             // Buffers used for large array literals
1171             Vector<Vector<JSValue> > m_constantBuffers;
1172             
1173             // Jump Tables
1174             Vector<SimpleJumpTable> m_immediateSwitchJumpTables;
1175             Vector<SimpleJumpTable> m_characterSwitchJumpTables;
1176             Vector<StringJumpTable> m_stringSwitchJumpTables;
1177
1178             EvalCodeCache m_evalCodeCache;
1179
1180             // Expression info - present if debugging.
1181             Vector<ExpressionRangeInfo> m_expressionInfo;
1182             // Line info - present if profiling or debugging.
1183             Vector<LineInfo> m_lineInfo;
1184 #if ENABLE(JIT)
1185             Vector<CallReturnOffsetToBytecodeOffset> m_callReturnIndexVector;
1186 #endif
1187 #if ENABLE(DFG_JIT)
1188             SegmentedVector<InlineCallFrame, 4> m_inlineCallFrames;
1189             Vector<CodeOriginAtCallReturnOffset> m_codeOrigins;
1190 #endif
1191         };
1192 #if COMPILER(MSVC)
1193         friend void WTF::deleteOwnedPtr<RareData>(RareData*);
1194 #endif
1195         OwnPtr<RareData> m_rareData;
1196 #if ENABLE(JIT)
1197         CompileWithDFGState m_canCompileWithDFGState;
1198 #endif
1199     };
1200
1201     // Program code is not marked by any function, so we make the global object
1202     // responsible for marking it.
1203
1204     class GlobalCodeBlock : public CodeBlock {
1205     protected:
1206         GlobalCodeBlock(CopyParsedBlockTag, GlobalCodeBlock& other)
1207             : CodeBlock(CopyParsedBlock, other, &m_unsharedSymbolTable)
1208             , m_unsharedSymbolTable(other.m_unsharedSymbolTable)
1209         {
1210         }
1211         
1212         GlobalCodeBlock(ScriptExecutable* ownerExecutable, CodeType codeType, JSGlobalObject* globalObject, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, PassOwnPtr<CodeBlock> alternative)
1213             : CodeBlock(ownerExecutable, codeType, globalObject, sourceProvider, sourceOffset, &m_unsharedSymbolTable, false, alternative)
1214         {
1215         }
1216
1217     private:
1218         SymbolTable m_unsharedSymbolTable;
1219     };
1220
1221     class ProgramCodeBlock : public GlobalCodeBlock {
1222     public:
1223         ProgramCodeBlock(CopyParsedBlockTag, ProgramCodeBlock& other)
1224             : GlobalCodeBlock(CopyParsedBlock, other)
1225         {
1226         }
1227
1228         ProgramCodeBlock(ProgramExecutable* ownerExecutable, CodeType codeType, JSGlobalObject* globalObject, PassRefPtr<SourceProvider> sourceProvider, PassOwnPtr<CodeBlock> alternative)
1229             : GlobalCodeBlock(ownerExecutable, codeType, globalObject, sourceProvider, 0, alternative)
1230         {
1231         }
1232         
1233 #if ENABLE(JIT)
1234     protected:
1235         virtual JSObject* compileOptimized(ExecState*, ScopeChainNode*);
1236         virtual void jettison();
1237         virtual CodeBlock* replacement();
1238         virtual bool canCompileWithDFGInternal();
1239 #endif
1240     };
1241
1242     class EvalCodeBlock : public GlobalCodeBlock {
1243     public:
1244         EvalCodeBlock(CopyParsedBlockTag, EvalCodeBlock& other)
1245             : GlobalCodeBlock(CopyParsedBlock, other)
1246             , m_baseScopeDepth(other.m_baseScopeDepth)
1247             , m_variables(other.m_variables)
1248         {
1249         }
1250         
1251         EvalCodeBlock(EvalExecutable* ownerExecutable, JSGlobalObject* globalObject, PassRefPtr<SourceProvider> sourceProvider, int baseScopeDepth, PassOwnPtr<CodeBlock> alternative)
1252             : GlobalCodeBlock(ownerExecutable, EvalCode, globalObject, sourceProvider, 0, alternative)
1253             , m_baseScopeDepth(baseScopeDepth)
1254         {
1255         }
1256
1257         int baseScopeDepth() const { return m_baseScopeDepth; }
1258
1259         const Identifier& variable(unsigned index) { return m_variables[index]; }
1260         unsigned numVariables() { return m_variables.size(); }
1261         void adoptVariables(Vector<Identifier>& variables)
1262         {
1263             ASSERT(m_variables.isEmpty());
1264             m_variables.swap(variables);
1265         }
1266         
1267 #if ENABLE(JIT)
1268     protected:
1269         virtual JSObject* compileOptimized(ExecState*, ScopeChainNode*);
1270         virtual void jettison();
1271         virtual CodeBlock* replacement();
1272         virtual bool canCompileWithDFGInternal();
1273 #endif
1274
1275     private:
1276         int m_baseScopeDepth;
1277         Vector<Identifier> m_variables;
1278     };
1279
1280     class FunctionCodeBlock : public CodeBlock {
1281     public:
1282         FunctionCodeBlock(CopyParsedBlockTag, FunctionCodeBlock& other)
1283             : CodeBlock(CopyParsedBlock, other, other.sharedSymbolTable())
1284         {
1285             // The fact that we have to do this is yucky, but is necessary because of the
1286             // class hierarchy issues described in the comment block for the main
1287             // constructor, below.
1288             sharedSymbolTable()->ref();
1289         }
1290
1291         // Rather than using the usual RefCounted::create idiom for SharedSymbolTable we just use new
1292         // as we need to initialise the CodeBlock before we could initialise any RefPtr to hold the shared
1293         // symbol table, so we just pass as a raw pointer with a ref count of 1.  We then manually deref
1294         // in the destructor.
1295         FunctionCodeBlock(FunctionExecutable* ownerExecutable, CodeType codeType, JSGlobalObject* globalObject, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, bool isConstructor, PassOwnPtr<CodeBlock> alternative = nullptr)
1296             : CodeBlock(ownerExecutable, codeType, globalObject, sourceProvider, sourceOffset, SharedSymbolTable::create().leakRef(), isConstructor, alternative)
1297         {
1298         }
1299         ~FunctionCodeBlock()
1300         {
1301             sharedSymbolTable()->deref();
1302         }
1303         
1304 #if ENABLE(JIT)
1305     protected:
1306         virtual JSObject* compileOptimized(ExecState*, ScopeChainNode*);
1307         virtual void jettison();
1308         virtual CodeBlock* replacement();
1309         virtual bool canCompileWithDFGInternal();
1310 #endif
1311     };
1312
1313     // Use this if you want to copy a code block and you're paranoid about a GC
1314     // happening.
1315     class BytecodeDestructionBlocker {
1316     public:
1317         BytecodeDestructionBlocker(CodeBlock* codeBlock)
1318             : m_codeBlock(codeBlock)
1319             , m_oldValueOfShouldDiscardBytecode(codeBlock->m_shouldDiscardBytecode)
1320         {
1321             codeBlock->m_shouldDiscardBytecode = false;
1322         }
1323         
1324         ~BytecodeDestructionBlocker()
1325         {
1326             m_codeBlock->m_shouldDiscardBytecode = m_oldValueOfShouldDiscardBytecode;
1327         }
1328         
1329     private:
1330         CodeBlock* m_codeBlock;
1331         bool m_oldValueOfShouldDiscardBytecode;
1332     };
1333
1334     inline CodeBlock* baselineCodeBlockForOriginAndBaselineCodeBlock(const CodeOrigin& codeOrigin, CodeBlock* baselineCodeBlock)
1335     {
1336         if (codeOrigin.inlineCallFrame) {
1337             ExecutableBase* executable = codeOrigin.inlineCallFrame->executable.get();
1338             ASSERT(executable->structure()->classInfo() == &FunctionExecutable::s_info);
1339             return static_cast<FunctionExecutable*>(executable)->baselineCodeBlockFor(codeOrigin.inlineCallFrame->isCall ? CodeForCall : CodeForConstruct);
1340         }
1341         return baselineCodeBlock;
1342     }
1343     
1344
1345     inline Register& ExecState::r(int index)
1346     {
1347         CodeBlock* codeBlock = this->codeBlock();
1348         if (codeBlock->isConstantRegisterIndex(index))
1349             return *reinterpret_cast<Register*>(&codeBlock->constantRegister(index));
1350         return this[index];
1351     }
1352
1353     inline Register& ExecState::uncheckedR(int index)
1354     {
1355         ASSERT(index < FirstConstantRegisterIndex);
1356         return this[index];
1357     }
1358
1359 #if ENABLE(DFG_JIT)
1360     inline bool ExecState::isInlineCallFrame()
1361     {
1362         if (LIKELY(!codeBlock() || codeBlock()->getJITType() != JITCode::DFGJIT))
1363             return false;
1364         return isInlineCallFrameSlow();
1365     }
1366 #endif
1367
1368 #if ENABLE(DFG_JIT)
1369     inline void DFGCodeBlocks::mark(void* candidateCodeBlock)
1370     {
1371         // We have to check for 0 and -1 because those are used by the HashMap as markers.
1372         uintptr_t value = reinterpret_cast<uintptr_t>(candidateCodeBlock);
1373         
1374         // This checks for both of those nasty cases in one go.
1375         // 0 + 1 = 1
1376         // -1 + 1 = 0
1377         if (value + 1 <= 1)
1378             return;
1379         
1380         HashSet<CodeBlock*>::iterator iter = m_set.find(static_cast<CodeBlock*>(candidateCodeBlock));
1381         if (iter == m_set.end())
1382             return;
1383         
1384         (*iter)->m_dfgData->mayBeExecuting = true;
1385     }
1386 #endif
1387     
1388 } // namespace JSC
1389
1390 #endif // CodeBlock_h